From 7024628c65527e18528ed4007c560e6c96fe81de Mon Sep 17 00:00:00 2001 From: David Reid Date: Tue, 4 Feb 2020 22:43:31 +1000 Subject: [PATCH] Update to miniaudio 0.10 (#1092) * Update to miniaudio 0.10 This replaces the old ma_pcm_converter with ma_data_converter. At this time of this commit, miniaudio 0.10 is still in the testing phase. To make it easier to update miniaudio.h during this period, I've temporarily moved the @raysan5 Win32 customizations to raudio.c because there may be quite a few updates to miniaudio.h during this time. * Use miniaudio's built-in volume control. --- src/external/miniaudio.h | 20578 +++++++++++++++++++------------------ src/raudio.c | 359 +- 2 files changed, 11072 insertions(+), 9865 deletions(-) diff --git a/src/external/miniaudio.h b/src/external/miniaudio.h index 1927d96f2..2fc359c41 100644 --- a/src/external/miniaudio.h +++ b/src/external/miniaudio.h @@ -1,6 +1,6 @@ /* Audio playback and capture library. Choice of public domain or MIT-0. See license statements at the end of this file. -miniaudio (formerly mini_al) - v0.9.8 - 2019-10-07 +miniaudio (formerly mini_al) - v0.xx.xx - 2020-xx-xx David Reid - davidreidsoftware@gmail.com @@ -261,13 +261,8 @@ NOTES ===== - This library uses an asynchronous API for delivering and requesting audio data. Each device will have it's own worker thread which is managed by the library. -- If ma_device_init() is called with a device that's not aligned to the 4 bytes on 32-bit or 8 bytes on - 64-bit it will _not_ be thread-safe. The reason for this is that it depends on members of ma_device being - correctly aligned for atomic assignments. - Sample data is always native-endian and interleaved. For example, ma_format_s16 means signed 16-bit - integer samples, interleaved. Let me know if you need non-interleaved and I'll look into it. -- The sndio backend is currently only enabled on OpenBSD builds. -- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can use it. + integer samples, interleaved. - Automatic stream routing is enabled on a per-backend basis. Support is explicitly enabled for WASAPI and Core Audio, however other backends such as PulseAudio may naturally support it, though not all have been tested. @@ -277,6 +272,11 @@ NOTES - By default miniaudio will automatically clip samples. This only applies when the playback sample format is configured as ma_format_f32. If you are doing clipping yourself, you can disable this overhead by setting noClip to true in the device config. +- If ma_device_init() is called with a device that's not aligned to the 4 bytes on 32-bit or 8 bytes on + 64-bit it will _not_ be thread-safe. The reason for this is that it depends on members of ma_device being + correctly aligned for atomic assignments. +- The sndio backend is currently only enabled on OpenBSD builds. +- The audio(4) backend is supported on OpenBSD, but you may need to disable sndiod before you can use it. BACKEND NUANCES @@ -320,6 +320,7 @@ UWP Web Audio / Emscripten ---------------------- +- You cannot use -std=c* compiler flags, nor -ansi. This only applies to the Emscripten build. - The first time a context is initialized it will create a global object called "miniaudio" whose primary purpose is to act as a factory for device objects. - Currently the Web Audio backend uses ScriptProcessorNode's, but this may need to change later as they've been deprecated. @@ -492,6 +493,7 @@ extern "C" { #else #define MA_POSIX #include /* Unfortunate #include, but needed for pthread_t, pthread_mutex_t and pthread_cond_t types. */ + #include #ifdef __unix__ #define MA_UNIX @@ -622,12 +624,6 @@ typedef ma_uint16 wchar_t; #define MA_ALIGN(alignment) #endif -#ifdef _MSC_VER -#define MA_ALIGNED_STRUCT(alignment) MA_ALIGN(alignment) struct -#else -#define MA_ALIGNED_STRUCT(alignment) struct MA_ALIGN(alignment) -#endif - /* SIMD alignment in bytes. Currently set to 64 bytes in preparation for future AVX-512 optimizations. */ #define MA_SIMD_ALIGNMENT 64 @@ -700,7 +696,7 @@ typedef ma_uint8 ma_channel; #define MA_CHANNEL_AUX_31 51 #define MA_CHANNEL_LEFT MA_CHANNEL_FRONT_LEFT #define MA_CHANNEL_RIGHT MA_CHANNEL_FRONT_RIGHT -#define MA_CHANNEL_POSITION_COUNT MA_CHANNEL_AUX_31 + 1 +#define MA_CHANNEL_POSITION_COUNT (MA_CHANNEL_AUX_31 + 1) typedef int ma_result; @@ -744,7 +740,8 @@ typedef int ma_result; #define MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE -310 #define MA_FAILED_TO_CREATE_MUTEX -311 #define MA_FAILED_TO_CREATE_EVENT -312 -#define MA_FAILED_TO_CREATE_THREAD -313 +#define MA_FAILED_TO_CREATE_SEMAPHORE -313 +#define MA_FAILED_TO_CREATE_THREAD -314 /* Standard sample rates. */ @@ -837,722 +834,628 @@ typedef enum } ma_performance_profile; -typedef struct ma_format_converter ma_format_converter; -typedef ma_uint32 (* ma_format_converter_read_proc) (ma_format_converter* pConverter, ma_uint32 frameCount, void* pFramesOut, void* pUserData); -typedef ma_uint32 (* ma_format_converter_read_deinterleaved_proc)(ma_format_converter* pConverter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData); - typedef struct { - ma_format formatIn; - ma_format formatOut; - ma_uint32 channels; - ma_stream_format streamFormatIn; - ma_stream_format streamFormatOut; - ma_dither_mode ditherMode; - ma_bool32 noSSE2 : 1; - ma_bool32 noAVX2 : 1; - ma_bool32 noAVX512 : 1; - ma_bool32 noNEON : 1; - ma_format_converter_read_proc onRead; - ma_format_converter_read_deinterleaved_proc onReadDeinterleaved; void* pUserData; -} ma_format_converter_config; - -struct ma_format_converter -{ - ma_format_converter_config config; - ma_bool32 useSSE2 : 1; - ma_bool32 useAVX2 : 1; - ma_bool32 useAVX512 : 1; - ma_bool32 useNEON : 1; - void (* onConvertPCM)(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode); - void (* onInterleavePCM)(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels); - void (* onDeinterleavePCM)(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels); -}; + void* (* onMalloc)(size_t sz, void* pUserData); + void* (* onRealloc)(void* p, size_t sz, void* pUserData); + void (* onFree)(void* p, void* pUserData); +} ma_allocation_callbacks; +/************************************************************************************************************************************************************** -typedef struct ma_channel_router ma_channel_router; -typedef ma_uint32 (* ma_channel_router_read_deinterleaved_proc)(ma_channel_router* pRouter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData); +Biquad Filtering +================ +Biquad filtering is achieved with the `ma_biquad` API. Example: -typedef struct -{ - ma_uint32 channelsIn; - ma_uint32 channelsOut; - ma_channel channelMapIn[MA_MAX_CHANNELS]; - ma_channel channelMapOut[MA_MAX_CHANNELS]; - ma_channel_mix_mode mixingMode; - float weights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ - ma_bool32 noSSE2 : 1; - ma_bool32 noAVX2 : 1; - ma_bool32 noAVX512 : 1; - ma_bool32 noNEON : 1; - ma_channel_router_read_deinterleaved_proc onReadDeinterleaved; - void* pUserData; -} ma_channel_router_config; + ```c + ma_biquad_config config = ma_biquad_config_init(ma_format_f32, channels, b0, b1, b2, a0, a1, a2); + ma_result result = ma_biquad_init(&config, &biquad); + if (result != MA_SUCCESS) { + // Error. + } -struct ma_channel_router -{ - ma_channel_router_config config; - ma_bool32 isPassthrough : 1; - ma_bool32 isSimpleShuffle : 1; - ma_bool32 isSimpleMonoExpansion : 1; - ma_bool32 isStereoToMono : 1; - ma_bool32 useSSE2 : 1; - ma_bool32 useAVX2 : 1; - ma_bool32 useAVX512 : 1; - ma_bool32 useNEON : 1; - ma_uint8 shuffleTable[MA_MAX_CHANNELS]; -}; + ... + ma_biquad_process_pcm_frames(&biquad, pFramesOut, pFramesIn, frameCount); + ``` +Biquad filtering is implemented using transposed direct form 2. The denominator coefficients are a0, a1 and a2, and the numerator coefficients are b0, b1 and +b2. The a0 coefficient is required and coefficients must not be pre-normalized. -typedef struct ma_src ma_src; -typedef ma_uint32 (* ma_src_read_deinterleaved_proc)(ma_src* pSRC, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData); /* Returns the number of frames that were read. */ +Supported formats are ma_format_s16 and ma_format_f32. If you need to use a different format you need to convert it yourself beforehand. When using +ma_format_s16 the biquad filter will use fixed point arithmetic. When using ma_format_f32, floating point arithmetic will be used. -typedef enum -{ - ma_src_algorithm_linear = 0, - ma_src_algorithm_sinc, - ma_src_algorithm_none, - ma_src_algorithm_default = ma_src_algorithm_linear -} ma_src_algorithm; +Input and output frames are always interleaved. -typedef enum -{ - ma_src_sinc_window_function_hann = 0, - ma_src_sinc_window_function_rectangular, - ma_src_sinc_window_function_default = ma_src_sinc_window_function_hann -} ma_src_sinc_window_function; +Filtering can be applied in-place by passing in the same pointer for both the input and output buffers, like so: -typedef struct + ```c + ma_biquad_process_pcm_frames(&biquad, pMyData, pMyData, frameCount); + ``` + +If you need to change the values of the coefficients, but maintain the values in the registers you can do so with `ma_biquad_reinit()`. This is useful if you +need to change the properties of the filter while keeping the values of registers valid to avoid glitching or whatnot. Do not use `ma_biquad_init()` for this +as it will do a full initialization which involves clearing the registers to 0. Note that changing the format or channel count after initialization is invalid +and will result in an error. + +**************************************************************************************************************************************************************/ +typedef union { - ma_src_sinc_window_function windowFunction; - ma_uint32 windowWidth; -} ma_src_config_sinc; + float f32; + ma_int32 s32; +} ma_biquad_coefficient; typedef struct { - ma_uint32 sampleRateIn; - ma_uint32 sampleRateOut; + ma_format format; ma_uint32 channels; - ma_src_algorithm algorithm; - ma_bool32 neverConsumeEndOfInput : 1; - ma_bool32 noSSE2 : 1; - ma_bool32 noAVX2 : 1; - ma_bool32 noAVX512 : 1; - ma_bool32 noNEON : 1; - ma_src_read_deinterleaved_proc onReadDeinterleaved; - void* pUserData; - ma_src_config_sinc sinc; -} ma_src_config; - -MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_src -{ - union - { - struct - { - MA_ALIGN(MA_SIMD_ALIGNMENT) float input[MA_MAX_CHANNELS][MA_SRC_INPUT_BUFFER_SIZE_IN_SAMPLES]; - float timeIn; - ma_uint32 leftoverFrames; - } linear; - - struct - { - MA_ALIGN(MA_SIMD_ALIGNMENT) float input[MA_MAX_CHANNELS][MA_SRC_SINC_MAX_WINDOW_WIDTH*2 + MA_SRC_INPUT_BUFFER_SIZE_IN_SAMPLES]; - float timeIn; - ma_uint32 inputFrameCount; /* The number of frames sitting in the input buffer, not including the first half of the window. */ - ma_uint32 windowPosInSamples; /* An offset of . */ - float table[MA_SRC_SINC_MAX_WINDOW_WIDTH*1 * MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION]; /* Precomputed lookup table. The +1 is used to avoid the need for an overflow check. */ - } sinc; - }; - - ma_src_config config; - ma_bool32 isEndOfInputLoaded : 1; - ma_bool32 useSSE2 : 1; - ma_bool32 useAVX2 : 1; - ma_bool32 useAVX512 : 1; - ma_bool32 useNEON : 1; -}; + double b0; + double b1; + double b2; + double a0; + double a1; + double a2; +} ma_biquad_config; -typedef struct ma_pcm_converter ma_pcm_converter; -typedef ma_uint32 (* ma_pcm_converter_read_proc)(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData); +ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2); typedef struct { - ma_format formatIn; - ma_uint32 channelsIn; - ma_uint32 sampleRateIn; - ma_channel channelMapIn[MA_MAX_CHANNELS]; - ma_format formatOut; - ma_uint32 channelsOut; - ma_uint32 sampleRateOut; - ma_channel channelMapOut[MA_MAX_CHANNELS]; - ma_channel_mix_mode channelMixMode; - ma_dither_mode ditherMode; - ma_src_algorithm srcAlgorithm; - ma_bool32 allowDynamicSampleRate; - ma_bool32 neverConsumeEndOfInput : 1; /* <-- For SRC. */ - ma_bool32 noSSE2 : 1; - ma_bool32 noAVX2 : 1; - ma_bool32 noAVX512 : 1; - ma_bool32 noNEON : 1; - ma_pcm_converter_read_proc onRead; - void* pUserData; - union - { - ma_src_config_sinc sinc; - }; -} ma_pcm_converter_config; - -MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_pcm_converter -{ - ma_pcm_converter_read_proc onRead; - void* pUserData; - ma_format_converter formatConverterIn; /* For converting data to f32 in preparation for further processing. */ - ma_format_converter formatConverterOut; /* For converting data to the requested output format. Used as the final step in the processing pipeline. */ - ma_channel_router channelRouter; /* For channel conversion. */ - ma_src src; /* For sample rate conversion. */ - ma_bool32 isDynamicSampleRateAllowed : 1; /* ma_pcm_converter_set_input_sample_rate() and ma_pcm_converter_set_output_sample_rate() will fail if this is set to false. */ - ma_bool32 isPreFormatConversionRequired : 1; - ma_bool32 isPostFormatConversionRequired : 1; - ma_bool32 isChannelRoutingRequired : 1; - ma_bool32 isSRCRequired : 1; - ma_bool32 isChannelRoutingAtStart : 1; - ma_bool32 isPassthrough : 1; /* <-- Will be set to true when the conversion pipeline is an optimized passthrough. */ -}; + ma_format format; + ma_uint32 channels; + ma_biquad_coefficient b0; + ma_biquad_coefficient b1; + ma_biquad_coefficient b2; + ma_biquad_coefficient a1; + ma_biquad_coefficient a2; + ma_biquad_coefficient r1[MA_MAX_CHANNELS]; + ma_biquad_coefficient r2[MA_MAX_CHANNELS]; +} ma_biquad; +ma_result ma_biquad_init(const ma_biquad_config* pConfig, ma_biquad* pBQ); +ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ); +ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +ma_uint32 ma_biquad_get_latency(ma_biquad* pBQ); -/************************************************************************************************************************************************************ -************************************************************************************************************************************************************* -DATA CONVERSION -=============== +/************************************************************************************************************************************************************** -This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc. +Low-Pass Filtering +================== +Low-pass filtering is achieved with the `ma_lpf` API. Example: -************************************************************************************************************************************************************* -************************************************************************************************************************************************************/ + ```c + ma_lpf_config config = ma_lpf_config_init(ma_format_f32, channels, sampleRate, cutoffFrequency); + ma_result result = ma_lpf_init(&config, &lpf); + if (result != MA_SUCCESS) { + // Error. + } -/************************************************************************************************************************************************************ + ... -Channel Maps -============ + ma_lpf_process_pcm_frames(&lpf, pFramesOut, pFramesIn, frameCount); + ``` -Below is the channel map used by ma_standard_channel_map_default: +Supported formats are ma_format_s16 and ma_format_f32. If you need to use a different format you need to convert it yourself beforehand. Input and output +frames are always interleaved. -|---------------|------------------------------| -| Channel Count | Mapping | -|---------------|------------------------------| -| 1 (Mono) | 0: MA_CHANNEL_MONO | -|---------------|------------------------------| -| 2 (Stereo) | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -|---------------|------------------------------| -| 3 | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -| | 2: MA_CHANNEL_FRONT_CENTER | -|---------------|------------------------------| -| 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -| | 2: MA_CHANNEL_FRONT_CENTER | -| | 3: MA_CHANNEL_BACK_CENTER | -|---------------|------------------------------| -| 5 | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -| | 2: MA_CHANNEL_FRONT_CENTER | -| | 3: MA_CHANNEL_BACK_LEFT | -| | 4: MA_CHANNEL_BACK_RIGHT | -|---------------|------------------------------| -| 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -| | 2: MA_CHANNEL_FRONT_CENTER | -| | 3: MA_CHANNEL_LFE | -| | 4: MA_CHANNEL_SIDE_LEFT | -| | 5: MA_CHANNEL_SIDE_RIGHT | -|---------------|------------------------------| -| 7 | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -| | 2: MA_CHANNEL_FRONT_CENTER | -| | 3: MA_CHANNEL_LFE | -| | 4: MA_CHANNEL_BACK_CENTER | -| | 4: MA_CHANNEL_SIDE_LEFT | -| | 5: MA_CHANNEL_SIDE_RIGHT | -|---------------|------------------------------| -| 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT | -| | 1: MA_CHANNEL_FRONT_RIGHT | -| | 2: MA_CHANNEL_FRONT_CENTER | -| | 3: MA_CHANNEL_LFE | -| | 4: MA_CHANNEL_BACK_LEFT | -| | 5: MA_CHANNEL_BACK_RIGHT | -| | 6: MA_CHANNEL_SIDE_LEFT | -| | 7: MA_CHANNEL_SIDE_RIGHT | -|---------------|------------------------------| -| Other | All channels set to 0. This | -| | is equivalent to the same | -| | mapping as the device. | -|---------------|------------------------------| +Filtering can be applied in-place by passing in the same pointer for both the input and output buffers, like so: -************************************************************************************************************************************************************/ + ```c + ma_lpf_process_pcm_frames(&lpf, pMyData, pMyData, frameCount); + ``` -/* -Helper for retrieving a standard channel map. -*/ -void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]); +The low-pass filter is implemented as a biquad filter. If you need increase the filter order, simply chain multiple low-pass filters together. -/* -Copies a channel map. -*/ -void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels); + ```c + for (iFilter = 0; iFilter < filterCount; iFilter += 1) { + ma_lpf_process_pcm_frames(&lpf[iFilter], pMyData, pMyData, frameCount); + } + ``` +If you need to change the configuration of the filter, but need to maintain the state of internal registers you can do so with `ma_lpf_reinit()`. This may be +useful if you need to change the sample rate and/or cutoff frequency dynamically while maintaing smooth transitions. Note that changing the format or channel +count after initialization is invalid and will result in an error. -/* -Determines whether or not a channel map is valid. +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRate; + double cutoffFrequency; +} ma_lpf_config; -A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but -is usually treated as a passthrough. +ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency); -Invalid channel maps: - - A channel map with no channels - - A channel map with more than one channel and a mono channel -*/ -ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]); +typedef struct +{ + ma_biquad bq; /* The low-pass filter is implemented as a biquad filter. */ +} ma_lpf; -/* -Helper for comparing two channel maps for equality. +ma_result ma_lpf_init(const ma_lpf_config* pConfig, ma_lpf* pLPF); +ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF); +ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); +ma_uint32 ma_lpf_get_latency(ma_lpf* pLPF); -This assumes the channel count is the same between the two. -*/ -ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]); -/* -Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE). -*/ -ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]); +/************************************************************************************************************************************************************** -/* -Helper for determining whether or not a channel is present in the given channel map. -*/ -ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition); +Resampling +========== +Resampling is achieved with the `ma_resampler` object. To create a resampler object, do something like the following: + ```c + ma_resampler_config config = ma_resampler_config_init(ma_format_s16, channels, sampleRateIn, sampleRateOut, ma_resample_algorithm_linear); + ma_resampler resampler; + ma_result result = ma_resampler_init(&config, &resampler); + if (result != MA_SUCCESS) { + // An error occurred... + } + ``` -/************************************************************************************************************************************************************ +Do the following to uninitialize the resampler: -Format Conversion -================= -The format converter serves two purposes: - 1) Conversion between data formats (u8 to f32, etc.) - 2) Interleaving and deinterleaving + ```c + ma_resampler_uninit(&resampler); + ``` -When initializing a converter, you specify the input and output formats (u8, s16, etc.) and read callbacks. There are two read callbacks - one for -interleaved input data (onRead) and another for deinterleaved input data (onReadDeinterleaved). You implement whichever is most convenient for you. You -can implement both, but it's not recommended as it just introduces unnecessary complexity. +The following example shows how data can be processed -To read data as interleaved samples, use ma_format_converter_read(). Otherwise use ma_format_converter_read_deinterleaved(). + ```c + ma_uint64 frameCountIn = 1000; + ma_uint64 frameCountOut = 2000; + ma_result result = ma_resampler_process_pcm_frames(&resampler, pFramesIn, &frameCountIn, pFramesOut, &frameCountOut); + if (result != MA_SUCCESS) { + // An error occurred... + } -Dithering ---------- -The format converter also supports dithering. Dithering can be set using ditherMode variable in the config, like so. + // At this point, frameCountIn contains the number of input frames that were consumed and frameCountOut contains the number of output frames written. + ``` - pConfig->ditherMode = ma_dither_mode_rectangle; +To initialize the resampler you first need to set up a config (`ma_resampler_config`) with `ma_resampler_config_init()`. You need to specify the sample format +you want to use, the number of channels, the input and output sample rate, and the algorithm. -The different dithering modes include the following, in order of efficiency: - - None: ma_dither_mode_none - - Rectangle: ma_dither_mode_rectangle - - Triangle: ma_dither_mode_triangle +The sample format can be one of the following: + - ma_format_s16 + - ma_format_f32 -Note that even if the dither mode is set to something other than ma_dither_mode_none, it will be ignored for conversions where dithering is not needed. -Dithering is available for the following conversions: - - s16 -> u8 - - s24 -> u8 - - s32 -> u8 - - f32 -> u8 - - s24 -> s16 - - s32 -> s16 - - f32 -> s16 +If you need a different format you will need to perform pre- and post-conversions yourself where necessary. Note that the format is the same for both input +and output. The format cannot be changed after initialization. -Note that it is not an error to pass something other than ma_dither_mode_none for conversions where dither is not used. It will just be ignored. +The resampler supports multiple channels and is always interleaved (both input and output). The channel count cannot be changed after initialization. -************************************************************************************************************************************************************/ +The sample rates can be anything other than zero, and are always specified in hertz. They should be set to something like 44100, etc. The sample rate is the +only configuration property that can be changed after initialization. -/* -Initializes a format converter. -*/ -ma_result ma_format_converter_init(const ma_format_converter_config* pConfig, ma_format_converter* pConverter); +The miniaudio resampler supports multiple algorithms: + - Linear: ma_resample_algorithm_linear + - Speex: ma_resample_algorithm_speex -/* -Reads data from the format converter as interleaved channels. -*/ -ma_uint64 ma_format_converter_read(ma_format_converter* pConverter, ma_uint64 frameCount, void* pFramesOut, void* pUserData); +Because Speex is not public domain it is strictly opt-in and the code is stored in separate files. if you opt-in to the Speex backend you will need to consider +it's license, the text of which can be found in it's source files in "extras/speex_resampler". Details on how to opt-in to the Speex resampler is explained in +the Speex Resampler section below. -/* -Reads data from the format converter as deinterleaved channels. -*/ -ma_uint64 ma_format_converter_read_deinterleaved(ma_format_converter* pConverter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData); +The algorithm cannot be changed after initialization. -/* -Helper for initializing a format converter config. -*/ -ma_format_converter_config ma_format_converter_config_init_new(void); -ma_format_converter_config ma_format_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_proc onRead, void* pUserData); -ma_format_converter_config ma_format_converter_config_init_deinterleaved(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_deinterleaved_proc onReadDeinterleaved, void* pUserData); +Processing always happens on a per PCM frame basis and always assumed interleaved input and output. De-interleaved processing is not supported. To process +frames, use `ma_resampler_process_pcm_frames()`. On input, this function takes the number of output frames you can fit in the output buffer and the number of +input frames contained in the input buffer. On output these variables contain the number of output frames that were written to the output buffer and the +number of input frames that were consumed in the process. You can pass in NULL for the input buffer in which case it will be treated as an infinitely large +buffer of zeros. The output buffer can also be NULL, in which case the processing will be treated as seek. +The sample rate can be changed dynamically on the fly. You can change this with explicit sample rates with `ma_resampler_set_rate()` and also with a decimal +ratio with `ma_resampler_set_rate_ratio()`. The ratio is in/out. +Sometimes it's useful to know exactly how many input frames will be required to output a specific number of frames. You can calculate this with +`ma_resampler_get_required_input_frame_count()`. Likewise, it's sometimes useful to know exactly how many frames would be output given a certain number of +input frames. You can do this with `ma_resampler_get_expected_output_frame_count()`. -/************************************************************************************************************************************************************ +Due to the nature of how resampling works, the resampler introduces some latency. This can be retrieved in terms of both the input rate and the output rate +with `ma_resampler_get_input_latency()` and `ma_resampler_get_output_latency()`. -Channel Routing -=============== -There are two main things you can do with the channel router: - 1) Rearrange channels - 2) Convert from one channel count to another -Channel Rearrangement +Resampling Algorithms --------------------- -A simple example of channel rearrangement may be swapping the left and right channels in a stereo stream. To do this you just pass in the same channel -count for both the input and output with channel maps that contain the same channels (in a different order). - -Channel Conversion ------------------- -The channel router can also convert from one channel count to another, such as converting a 5.1 stream to stero. When changing the channel count, the -router will first perform a 1:1 mapping of channel positions that are present in both the input and output channel maps. The second thing it will do -is distribute the input mono channel (if any) across all output channels, excluding any None and LFE channels. If there is an output mono channel, all -input channels will be averaged, excluding any None and LFE channels. - -The last case to consider is when a channel position in the input channel map is not present in the output channel map, and vice versa. In this case the -channel router will perform a blend of other related channels to produce an audible channel. There are several blending modes. - 1) Simple - Unmatched channels are silenced. - 2) Planar Blending - Channels are blended based on a set of planes that each speaker emits audio from. - -Rectangular / Planar Blending ------------------------------ -In this mode, channel positions are associated with a set of planes where the channel conceptually emits audio from. An example is the front/left speaker. -This speaker is positioned to the front of the listener, so you can think of it as emitting audio from the front plane. It is also positioned to the left -of the listener so you can think of it as also emitting audio from the left plane. Now consider the (unrealistic) situation where the input channel map -contains only the front/left channel position, but the output channel map contains both the front/left and front/center channel. When deciding on the audio -data to send to the front/center speaker (which has no 1:1 mapping with an input channel) we need to use some logic based on our available input channel -positions. - -As mentioned earlier, our front/left speaker is, conceptually speaking, emitting audio from the front _and_ the left planes. Similarly, the front/center -speaker is emitting audio from _only_ the front plane. What these two channels have in common is that they are both emitting audio from the front plane. -Thus, it makes sense that the front/center speaker should receive some contribution from the front/left channel. How much contribution depends on their -planar relationship (thus the name of this blending technique). - -Because the front/left channel is emitting audio from two planes (front and left), you can think of it as though it's willing to dedicate 50% of it's total -volume to each of it's planes (a channel position emitting from 1 plane would be willing to given 100% of it's total volume to that plane, and a channel -position emitting from 3 planes would be willing to given 33% of it's total volume to each plane). Similarly, the front/center speaker is emitting audio -from only one plane so you can think of it as though it's willing to _take_ 100% of it's volume from front plane emissions. Now, since the front/left -channel is willing to _give_ 50% of it's total volume to the front plane, and the front/center speaker is willing to _take_ 100% of it's total volume -from the front, you can imagine that 50% of the front/left speaker will be given to the front/center speaker. - -Usage ------ -To use the channel router you need to specify three things: - 1) The input channel count and channel map - 2) The output channel count and channel map - 3) The mixing mode to use in the case where a 1:1 mapping is unavailable +The choice of resampling algorithm depends on your situation and requirements. The linear resampler is the most efficient and has the least amount of latency, +but at the expense of poorer quality. The Speex resampler is higher quality, but slower with more latency. It also performs several heap applications +internally for memory management. -Note that input and output data is always deinterleaved 32-bit floating point. -Initialize the channel router with ma_channel_router_init(). You will need to pass in a config object which specifies the input and output configuration, -mixing mode and a callback for sending data to the router. This callback will be called when input data needs to be sent to the router for processing. Note -that the mixing mode is only used when a 1:1 mapping is unavailable. This includes the custom weights mode. +Linear Resampling +----------------- +The linear resampler is the fastest, but comes at the expense of poorer quality. There is, however, some control over the quality of the linear resampler which +may make it a suitable option depending on your requirements. -Read data from the channel router with ma_channel_router_read_deinterleaved(). Output data is always 32-bit floating point. +The linear resampler performs low-pass filtering before or after downsampling or upsampling, depending on the sample rates you're converting between. When +decreasing the sample rate, the low-pass filter will be applied before downsampling. When increasing the rate it will be performed after upsampling. By default +a second order low-pass filter will be applied. To improve quality you can chain low-pass filters together, up to a maximum of `MA_MAX_RESAMPLER_LPF_FILTERS`. +This comes at the expense of increased computational cost and latency. You can also disable filtering altogether by setting the filter count to 0. The filter +count is controlled with the `lpfCount` config variable. -************************************************************************************************************************************************************/ +The low-pass filter has a cutoff frequency which defaults to half the sample rate of the lowest of the input and output sample rates (Nyquist Frequency). This +can be controlled with the `lpfNyquistFactor` config variable. This defaults to 1, and should be in the range of 0..1, although a value of 0 does not make +sense and should be avoided. A value of 1 will use the Nyquist Frequency as the cutoff. A value of 0.5 will use half the Nyquist Frequency as the cutoff, etc. +Values less than 1 will result in more washed out sound due to more of the higher frequencies being removed. This config variable has no impact on performance +and is a purely perceptual configuration. -/* -Initializes a channel router where it is assumed that the input data is non-interleaved. -*/ -ma_result ma_channel_router_init(const ma_channel_router_config* pConfig, ma_channel_router* pRouter); +The API for the linear resampler is the same as the main resampler API, only it's called `ma_linear_resampler`. -/* -Reads data from the channel router as deinterleaved channels. -*/ -ma_uint64 ma_channel_router_read_deinterleaved(ma_channel_router* pRouter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData); -/* -Helper for initializing a channel router config. -*/ -ma_channel_router_config ma_channel_router_config_init(ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode, ma_channel_router_read_deinterleaved_proc onRead, void* pUserData); +Speex Resampling +---------------- +The Speex resampler is made up of third party code which is released under the BSD license. Because it is licensed differently to miniaudio, which is public +domain, it is strictly opt-in and all of it's code is stored in separate files. If you opt-in to the Speex resampler you must consider the license text in it's +source files. To opt-in, you must first #include the following file before the implementation of miniaudio.h: + #include "extras/speex_resampler/ma_speex_resampler.h" -/************************************************************************************************************************************************************ +Both the header and implementation is contained within the same file. To implementation can be included in your program like so: -Sample Rate Conversion -====================== + #define MINIAUDIO_SPEEX_RESAMPLER_IMPLEMENTATION + #include "extras/speex_resampler/ma_speex_resampler.h" -************************************************************************************************************************************************************/ +Note that even if you opt-in to the Speex backend, miniaudio won't use it unless you explicitly ask for it in the respective config of the object you are +initializing. If you try to use the Speex resampler without opting in, initialization of the `ma_resampler` object will fail with `MA_NO_BACKEND`. -/* -Initializes a sample rate conversion object. -*/ -ma_result ma_src_init(const ma_src_config* pConfig, ma_src* pSRC); +The only configuration option to consider with the Speex resampler is the `speex.quality` config variable. This is a value between 0 and 10, with 0 being +the worst/fastest and 10 being the best/slowest. The default value is 3. -/* -Dynamically adjusts the sample rate. +**************************************************************************************************************************************************************/ +#ifndef MA_MAX_RESAMPLER_LPF_FILTERS +#define MA_MAX_RESAMPLER_LPF_FILTERS 4 +#endif -This is useful for dynamically adjust pitch. Keep in mind, however, that this will speed up or slow down the sound. If this -is not acceptable you will need to use your own algorithm. -*/ -ma_result ma_src_set_sample_rate(ma_src* pSRC, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +typedef struct +{ + ma_format format; + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_uint32 lpfCount; /* How many low-pass filters to chain together. A single low-pass filter is second order. Setting this to 0 will disable low-pass filtering. */ + double lpfNyquistFactor; /* 0..1. Defaults to 1. 1 = Half the sampling frequency (Nyquist Frequency), 0.5 = Quarter the sampling frequency (half Nyquest Frequency), etc. */ +} ma_linear_resampler_config; -/* -Reads a number of frames. +ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); -Returns the number of frames actually read. -*/ -ma_uint64 ma_src_read_deinterleaved(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData); +typedef struct +{ + ma_linear_resampler_config config; + ma_uint32 inAdvanceInt; + ma_uint32 inAdvanceFrac; + ma_uint32 inTimeInt; + ma_uint32 inTimeFrac; + union + { + float f32[MA_MAX_CHANNELS]; + ma_int16 s16[MA_MAX_CHANNELS]; + } x0; /* The previous input frame. */ + union + { + float f32[MA_MAX_CHANNELS]; + ma_int16 s16[MA_MAX_CHANNELS]; + } x1; /* The next input frame. */ + ma_lpf lpf[MA_MAX_RESAMPLER_LPF_FILTERS]; +} ma_linear_resampler; + +ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, ma_linear_resampler* pResampler); +void ma_linear_resampler_uninit(ma_linear_resampler* pResampler); +ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); +ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut); +ma_uint64 ma_linear_resampler_get_required_input_frame_count(ma_linear_resampler* pResampler, ma_uint64 outputFrameCount); +ma_uint64 ma_linear_resampler_get_expected_output_frame_count(ma_linear_resampler* pResampler, ma_uint64 inputFrameCount); +ma_uint64 ma_linear_resampler_get_input_latency(ma_linear_resampler* pResampler); +ma_uint64 ma_linear_resampler_get_output_latency(ma_linear_resampler* pResampler); -/* -Helper for creating a sample rate conversion config. -*/ -ma_src_config ma_src_config_init_new(void); -ma_src_config ma_src_config_init(ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_uint32 channels, ma_src_read_deinterleaved_proc onReadDeinterleaved, void* pUserData); +typedef enum +{ + ma_resample_algorithm_linear = 0, /* Fastest, lowest quality. Optional low-pass filtering. Default. */ + ma_resample_algorithm_speex +} ma_resample_algorithm; +typedef struct +{ + ma_format format; /* Must be either ma_format_f32 or ma_format_s16. */ + ma_uint32 channels; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfCount; + double lpfNyquistFactor; + } linear; + struct + { + int quality; /* 0 to 10. Defaults to 3. */ + } speex; +} ma_resampler_config; -/************************************************************************************************************************************************************ +ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm); -Conversion +typedef struct +{ + ma_resampler_config config; + union + { + ma_linear_resampler linear; + struct + { + void* pSpeexResamplerState; /* SpeexResamplerState* */ + } speex; + } state; +} ma_resampler; -************************************************************************************************************************************************************/ +/* +Initializes a new resampler object from a config. +*/ +ma_result ma_resampler_init(const ma_resampler_config* pConfig, ma_resampler* pResampler); /* -Initializes a DSP object. +Uninitializes a resampler. */ -ma_result ma_pcm_converter_init(const ma_pcm_converter_config* pConfig, ma_pcm_converter* pDSP); +void ma_resampler_uninit(ma_resampler* pResampler); /* -Dynamically adjusts the input sample rate. +Converts the given input data. -This will fail is the DSP was not initialized with allowDynamicSampleRate. +Both the input and output frames must be in the format specified in the config when the resampler was initilized. -DEPRECATED. Use ma_pcm_converter_set_sample_rate() instead. -*/ -ma_result ma_pcm_converter_set_input_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateOut); +On input, [pFrameCountOut] contains the number of output frames to process. On output it contains the number of output frames that +were actually processed, which may be less than the requested amount which will happen if there's not enough input data. You can use +ma_resampler_get_expected_output_frame_count() to know how many output frames will be processed for a given number of input frames. -/* -Dynamically adjusts the output sample rate. +On input, [pFrameCountIn] contains the number of input frames contained in [pFramesIn]. On output it contains the number of whole +input frames that were actually processed. You can use ma_resampler_get_required_input_frame_count() to know how many input frames +you should provide for a given number of output frames. [pFramesIn] can be NULL, in which case zeroes will be used instead. -This is useful for dynamically adjust pitch. Keep in mind, however, that this will speed up or slow down the sound. If this -is not acceptable you will need to use your own algorithm. +If [pFramesOut] is NULL, a seek is performed. In this case, if [pFrameCountOut] is not NULL it will seek by the specified number of +output frames. Otherwise, if [pFramesCountOut] is NULL and [pFrameCountIn] is not NULL, it will seek by the specified number of input +frames. When seeking, [pFramesIn] is allowed to NULL, in which case the internal timing state will be updated, but no input will be +processed. In this case, any internal filter state will be updated as if zeroes were passed in. -This will fail is the DSP was not initialized with allowDynamicSampleRate. +It is an error for [pFramesOut] to be non-NULL and [pFrameCountOut] to be NULL. -DEPRECATED. Use ma_pcm_converter_set_sample_rate() instead. +It is an error for both [pFrameCountOut] and [pFrameCountIn] to be NULL. */ -ma_result ma_pcm_converter_set_output_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateOut); +ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); + /* -Dynamically adjusts the output sample rate. +Sets the input and output sample sample rate. +*/ +ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); -This is useful for dynamically adjust pitch. Keep in mind, however, that this will speed up or slow down the sound. If this -is not acceptable you will need to use your own algorithm. +/* +Sets the input and output sample rate as a ratio. -This will fail if the DSP was not initialized with allowDynamicSampleRate. +The ration is in/out. */ -ma_result ma_pcm_converter_set_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio); + /* -Reads a number of frames and runs them through the DSP processor. +Calculates the number of whole input frames that would need to be read from the client in order to output the specified +number of output frames. + +The returned value does not include cached input frames. It only returns the number of extra frames that would need to be +read from the input buffer in order to output the specified number of output frames. */ -ma_uint64 ma_pcm_converter_read(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint64 frameCount); +ma_uint64 ma_resampler_get_required_input_frame_count(ma_resampler* pResampler, ma_uint64 outputFrameCount); /* -Helper for initializing a ma_pcm_converter_config object. +Calculates the number of whole output frames that would be output after fully reading and consuming the specified number of +input frames. */ -ma_pcm_converter_config ma_pcm_converter_config_init_new(void); -ma_pcm_converter_config ma_pcm_converter_config_init(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_pcm_converter_read_proc onRead, void* pUserData); -ma_pcm_converter_config ma_pcm_converter_config_init_ex(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], ma_pcm_converter_read_proc onRead, void* pUserData); +ma_uint64 ma_resampler_get_expected_output_frame_count(ma_resampler* pResampler, ma_uint64 inputFrameCount); -/* -High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to -determine the required size of the output buffer. -A return value of 0 indicates an error. +/* +Retrieves the latency introduced by the resampler in input frames. +*/ +ma_uint64 ma_resampler_get_input_latency(ma_resampler* pResampler); -This function is useful for one-off bulk conversions, but if you're streaming data you should use the ma_pcm_converter APIs instead. +/* +Retrieves the latency introduced by the resampler in output frames. */ -ma_uint64 ma_convert_frames(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_uint64 frameCount); -ma_uint64 ma_convert_frames_ex(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint64 frameCount); +ma_uint64 ma_resampler_get_output_latency(ma_resampler* pResampler); -/************************************************************************************************************************************************************ -Ring Buffer -=========== +/************************************************************************************************************************************************************** -Features --------- -- Lock free (assuming single producer, single consumer) -- Support for interleaved and deinterleaved streams -- Allows the caller to allocate their own block of memory +Channel Conversion +================== +Channel conversion is used for channel rearrangement and conversion from one channel count to another. The `ma_channel_converter` API is used for channel +conversion. Below is an example of initializing a simple channel converter which converts from mono to stereo. -Usage ------ -- Call ma_rb_init() to initialize a simple buffer, with an optional pre-allocated buffer. If you pass in NULL - for the pre-allocated buffer, it will be allocated for you and free()'d in ma_rb_uninit(). If you pass in - your own pre-allocated buffer, free()-ing is left to you. + ```c + ma_channel_converter_config config = ma_channel_converter_config_init(ma_format, 1, NULL, 2, NULL, ma_channel_mix_mode_default, NULL); + result = ma_channel_converter_init(&config, &converter); + if (result != MA_SUCCESS) { + // Error. + } + ``` -- Call ma_rb_init_ex() if you need a deinterleaved buffer. The data for each sub-buffer is offset from each - other based on the stride. Use ma_rb_get_subbuffer_stride(), ma_rb_get_subbuffer_offset() and - ma_rb_get_subbuffer_ptr() to manage your sub-buffers. +To process perform the conversion simply call `ma_channel_converter_process_pcm_frames()` like so: -- Use ma_rb_acquire_read() and ma_rb_acquire_write() to retrieve a pointer to a section of the ring buffer. - You specify the number of bytes you need, and on output it will set to what was actually acquired. If the - read or write pointer is positioned such that the number of bytes requested will require a loop, it will be - clamped to the end of the buffer. Therefore, the number of bytes you're given may be less than the number - you requested. + ```c + ma_result result = ma_channel_converter_process_pcm_frames(&converter, pFramesOut, pFramesIn, frameCount); + if (result != MA_SUCCESS) { + // Error. + } + ``` -- After calling ma_rb_acquire_read/write(), you do your work on the buffer and then "commit" it with - ma_rb_commit_read/write(). This is where the read/write pointers are updated. When you commit you need to - pass in the buffer that was returned by the earlier call to ma_rb_acquire_read/write() and is only used - for validation. The number of bytes passed to ma_rb_commit_read/write() is what's used to increment the - pointers. +It is up to the caller to ensure the output buffer is large enough to accomodate the new PCM frames. -- If you want to correct for drift between the write pointer and the read pointer you can use a combination - of ma_rb_pointer_distance(), ma_rb_seek_read() and ma_rb_seek_write(). Note that you can only move the - pointers forward, and you should only move the read pointer forward via the consumer thread, and the write - pointer forward by the producer thread. If there is too much space between the pointers, move the read - pointer forward. If there is too little space between the pointers, move the write pointer forward. +The only formats supported are ma_format_s16 and ma_format_f32. If you need another format you need to convert your data manually which you can do with +ma_pcm_convert(), etc. +Input and output PCM frames are always interleaved. Deinterleaved layouts are not supported. -Notes ------ -- Thread safety depends on a single producer, single consumer model. Only one thread is allowed to write, and - only one thread is allowed to read. The producer is the only one allowed to move the write pointer, and the - consumer is the only one allowed to move the read pointer. -- Operates on bytes. Use ma_pcm_rb to operate in terms of PCM frames. -- Maximum buffer size in bytes is 0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1) because of reasons. +Channel Mapping +--------------- +In addition to converting from one channel count to another, like the example above, The channel converter can also be used to rearrange channels. When +initializing the channel converter, you can optionally pass in channel maps for both the input and output frames. If the channel counts are the same, and each +channel map contains the same channel positions with the exception that they're in a different order, a simple shuffling of the channels with be performed. If, +however, there is not a 1:1 mapping of channel positions, or the channel counts differ, the input channels will be mixed based on a mixing +mode which is specified when initializing the `ma_channel_converter_config` object. -PCM Ring Buffer -=============== -This is the same as the regular ring buffer, except that it works on PCM frames instead of bytes. +When converting from mono to multi-channel, the mono channel is simply copied to each output channel. When going the other way around, the audio of each output +channel is simply averaged and copied to the mono channel. -************************************************************************************************************************************************************/ -typedef struct -{ - void* pBuffer; - ma_uint32 subbufferSizeInBytes; - ma_uint32 subbufferCount; - ma_uint32 subbufferStrideInBytes; - volatile ma_uint32 encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */ - volatile ma_uint32 encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */ - ma_bool32 ownsBuffer : 1; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */ - ma_bool32 clearOnWriteAcquire : 1; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */ -} ma_rb; +In more complicated cases blending is used. The `ma_channel_mix_mode_simple` mode will drop excess channels and silence extra channels. For example, converting +from 4 to 2 channels, the 3rd and 4th channels will be dropped, whereas converting from 2 to 4 channels will put silence into the 3rd and 4th channels. -ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB); -ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB); -void ma_rb_uninit(ma_rb* pRB); -void ma_rb_reset(ma_rb* pRB); -ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); -ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut); -ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); -ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut); -ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes); -ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes); -ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. Will return the number of bytes that can be read before the read pointer hits the write pointer. */ -ma_uint32 ma_rb_available_read(ma_rb* pRB); -ma_uint32 ma_rb_available_write(ma_rb* pRB); -size_t ma_rb_get_subbuffer_size(ma_rb* pRB); -size_t ma_rb_get_subbuffer_stride(ma_rb* pRB); -size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex); -void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer); +The `ma_channel_mix_mode_rectangle` mode uses spacial locality based on a rectangle to compute a simple distribution between input and output. Imagine sitting +in the middle of a room, with speakers on the walls representing channel positions. The MA_CHANNEL_FRONT_LEFT position can be thought of as being in the corner +of the front and left walls. +Finally, the `ma_channel_mix_mode_custom_weights` mode can be used to use custom user-defined weights. Custom weights can be passed in as the last parameter of +`ma_channel_converter_config_init()`. +**************************************************************************************************************************************************************/ typedef struct { - ma_rb rb; ma_format format; - ma_uint32 channels; -} ma_pcm_rb; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_channel_mix_mode mixingMode; + float weights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when mixingMode is set to ma_channel_mix_mode_custom_weights. */ +} ma_channel_converter_config; -ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB); -ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB); -void ma_pcm_rb_uninit(ma_pcm_rb* pRB); -void ma_pcm_rb_reset(ma_pcm_rb* pRB); -ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); -ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut); -ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); -ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut); -ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); -ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); -ma_int32 ma_pcm_rb_pointer_disance(ma_pcm_rb* pRB); /* Return value is in frames. */ -ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB); -ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB); -ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB); -ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB); -ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex); -void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer); +ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode); +typedef struct +{ + ma_format format; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_channel_mix_mode mixingMode; + union + { + float f32[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; + ma_int32 s16[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; + } weights; + ma_bool32 isPassthrough : 1; + ma_bool32 isSimpleShuffle : 1; + ma_bool32 isSimpleMonoExpansion : 1; + ma_bool32 isStereoToMono : 1; + ma_uint8 shuffleTable[MA_MAX_CHANNELS]; +} ma_channel_converter; -/************************************************************************************************************************************************************ +ma_result ma_channel_converter_init(const ma_channel_converter_config* pConfig, ma_channel_converter* pConverter); +void ma_channel_converter_uninit(ma_channel_converter* pConverter); +ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount); -Miscellaneous Helpers -************************************************************************************************************************************************************/ +/************************************************************************************************************************************************************** -/* -malloc(). Calls MA_MALLOC(). -*/ -void* ma_malloc(size_t sz); +Data Conversion +=============== -/* -realloc(). Calls MA_REALLOC(). -*/ -void* ma_realloc(void* p, size_t sz); +**************************************************************************************************************************************************************/ +typedef struct +{ + ma_format formatIn; + ma_format formatOut; + ma_uint32 channelsIn; + ma_uint32 channelsOut; + ma_uint32 sampleRateIn; + ma_uint32 sampleRateOut; + ma_channel channelMapIn[MA_MAX_CHANNELS]; + ma_channel channelMapOut[MA_MAX_CHANNELS]; + ma_dither_mode ditherMode; + ma_channel_mix_mode channelMixMode; + float channelWeights[MA_MAX_CHANNELS][MA_MAX_CHANNELS]; /* [in][out]. Only used when channelMixMode is set to ma_channel_mix_mode_custom_weights. */ + struct + { + ma_resample_algorithm algorithm; + ma_bool32 allowDynamicSampleRate; + struct + { + ma_uint32 lpfCount; + double lpfNyquistFactor; + } linear; + struct + { + int quality; + } speex; + } resampling; +} ma_data_converter_config; -/* -free(). Calls MA_FREE(). -*/ -void ma_free(void* p); +ma_data_converter_config ma_data_converter_config_init_default(void); +ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); -/* -Performs an aligned malloc, with the assumption that the alignment is a power of 2. -*/ -void* ma_aligned_malloc(size_t sz, size_t alignment); +typedef struct +{ + ma_data_converter_config config; + ma_channel_converter channelConverter; + ma_resampler resampler; + ma_bool32 hasPreFormatConversion : 1; + ma_bool32 hasPostFormatConversion : 1; + ma_bool32 hasChannelConverter : 1; + ma_bool32 hasResampler : 1; + ma_bool32 isPassthrough : 1; +} ma_data_converter; -/* -Free's an aligned malloc'd buffer. -*/ -void ma_aligned_free(void* p); +ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, ma_data_converter* pConverter); +void ma_data_converter_uninit(ma_data_converter* pConverter); +ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut); +ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut); +ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut); +ma_uint64 ma_data_converter_get_required_input_frame_count(ma_data_converter* pConverter, ma_uint64 outputFrameCount); +ma_uint64 ma_data_converter_get_expected_output_frame_count(ma_data_converter* pConverter, ma_uint64 inputFrameCount); +ma_uint64 ma_data_converter_get_input_latency(ma_data_converter* pConverter); +ma_uint64 ma_data_converter_get_output_latency(ma_data_converter* pConverter); -/* -Retrieves a friendly name for a format. -*/ -const char* ma_get_format_name(ma_format format); -/* -Blends two frames in floating point format. -*/ -void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels); -/* -Retrieves the size of a sample in bytes for the given format. -This API is efficient and is implemented using a lookup table. +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* -Thread Safety: SAFE - This API is pure. -*/ -ma_uint32 ma_get_bytes_per_sample(ma_format format); -static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; } +DATA CONVERSION +=============== -/* -Converts a log level to a string. -*/ -const char* ma_log_level_to_string(ma_uint32 logLevel); +This section contains the APIs for data conversion. You will find everything here for channel mapping, sample format conversion, resampling, etc. +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ /************************************************************************************************************************************************************ Format Conversion +================= + +Dithering +--------- +Dithering can be set using ditherMode parmater. + +The different dithering modes include the following, in order of efficiency: + - None: ma_dither_mode_none + - Rectangle: ma_dither_mode_rectangle + - Triangle: ma_dither_mode_triangle + +Note that even if the dither mode is set to something other than ma_dither_mode_none, it will be ignored for conversions where dithering is not needed. +Dithering is available for the following conversions: + - s16 -> u8 + - s24 -> u8 + - s32 -> u8 + - f32 -> u8 + - s24 -> s16 + - s32 -> s16 + - f32 -> s16 + +Note that it is not an error to pass something other than ma_dither_mode_none for conversions where dither is not used. It will just be ignored. ************************************************************************************************************************************************************/ void ma_pcm_u8_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); @@ -1576,6 +1479,7 @@ void ma_pcm_f32_to_s16(void* pOut, const void* pIn, ma_uint64 count, ma_dither_m void ma_pcm_f32_to_s24(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); void ma_pcm_f32_to_s32(void* pOut, const void* pIn, ma_uint64 count, ma_dither_mode ditherMode); void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode); +void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode); /* Deinterleaves an interleaved buffer. @@ -1587,17 +1491,308 @@ Interleaves a group of deinterleaved buffers. */ void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames); - /************************************************************************************************************************************************************ -************************************************************************************************************************************************************* -DEVICE I/O -========== +Channel Maps +============ -This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc. +Below is the channel map used by ma_standard_channel_map_default: -************************************************************************************************************************************************************* -************************************************************************************************************************************************************/ +|---------------|------------------------------| +| Channel Count | Mapping | +|---------------|------------------------------| +| 1 (Mono) | 0: MA_CHANNEL_MONO | +|---------------|------------------------------| +| 2 (Stereo) | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +|---------------|------------------------------| +| 3 | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +| | 2: MA_CHANNEL_FRONT_CENTER | +|---------------|------------------------------| +| 4 (Surround) | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +| | 2: MA_CHANNEL_FRONT_CENTER | +| | 3: MA_CHANNEL_BACK_CENTER | +|---------------|------------------------------| +| 5 | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +| | 2: MA_CHANNEL_FRONT_CENTER | +| | 3: MA_CHANNEL_BACK_LEFT | +| | 4: MA_CHANNEL_BACK_RIGHT | +|---------------|------------------------------| +| 6 (5.1) | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +| | 2: MA_CHANNEL_FRONT_CENTER | +| | 3: MA_CHANNEL_LFE | +| | 4: MA_CHANNEL_SIDE_LEFT | +| | 5: MA_CHANNEL_SIDE_RIGHT | +|---------------|------------------------------| +| 7 | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +| | 2: MA_CHANNEL_FRONT_CENTER | +| | 3: MA_CHANNEL_LFE | +| | 4: MA_CHANNEL_BACK_CENTER | +| | 4: MA_CHANNEL_SIDE_LEFT | +| | 5: MA_CHANNEL_SIDE_RIGHT | +|---------------|------------------------------| +| 8 (7.1) | 0: MA_CHANNEL_FRONT_LEFT | +| | 1: MA_CHANNEL_FRONT_RIGHT | +| | 2: MA_CHANNEL_FRONT_CENTER | +| | 3: MA_CHANNEL_LFE | +| | 4: MA_CHANNEL_BACK_LEFT | +| | 5: MA_CHANNEL_BACK_RIGHT | +| | 6: MA_CHANNEL_SIDE_LEFT | +| | 7: MA_CHANNEL_SIDE_RIGHT | +|---------------|------------------------------| +| Other | All channels set to 0. This | +| | is equivalent to the same | +| | mapping as the device. | +|---------------|------------------------------| + +************************************************************************************************************************************************************/ + +/* +Helper for retrieving a standard channel map. +*/ +void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]); + +/* +Copies a channel map. +*/ +void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels); + + +/* +Determines whether or not a channel map is valid. + +A blank channel map is valid (all channels set to MA_CHANNEL_NONE). The way a blank channel map is handled is context specific, but +is usually treated as a passthrough. + +Invalid channel maps: + - A channel map with no channels + - A channel map with more than one channel and a mono channel +*/ +ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]); + +/* +Helper for comparing two channel maps for equality. + +This assumes the channel count is the same between the two. +*/ +ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]); + +/* +Helper for determining if a channel map is blank (all channels set to MA_CHANNEL_NONE). +*/ +ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]); + +/* +Helper for determining whether or not a channel is present in the given channel map. +*/ +ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition); + + +/************************************************************************************************************************************************************ + +Conversion Helpers + +************************************************************************************************************************************************************/ + +/* +High-level helper for doing a full format conversion in one go. Returns the number of output frames. Call this with pOut set to NULL to +determine the required size of the output buffer. frameCountOut should be set to the capacity of pOut. If pOut is NULL, frameCountOut is +ignored. + +A return value of 0 indicates an error. + +This function is useful for one-off bulk conversions, but if you're streaming data you should use the ma_data_converter APIs instead. +*/ +ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn); +ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig); + + +/************************************************************************************************************************************************************ + +Ring Buffer +=========== + +Features +-------- +- Lock free (assuming single producer, single consumer) +- Support for interleaved and deinterleaved streams +- Allows the caller to allocate their own block of memory + +Usage +----- +- Call ma_rb_init() to initialize a simple buffer, with an optional pre-allocated buffer. If you pass in NULL + for the pre-allocated buffer, it will be allocated for you and free()'d in ma_rb_uninit(). If you pass in + your own pre-allocated buffer, free()-ing is left to you. + +- Call ma_rb_init_ex() if you need a deinterleaved buffer. The data for each sub-buffer is offset from each + other based on the stride. Use ma_rb_get_subbuffer_stride(), ma_rb_get_subbuffer_offset() and + ma_rb_get_subbuffer_ptr() to manage your sub-buffers. + +- Use ma_rb_acquire_read() and ma_rb_acquire_write() to retrieve a pointer to a section of the ring buffer. + You specify the number of bytes you need, and on output it will set to what was actually acquired. If the + read or write pointer is positioned such that the number of bytes requested will require a loop, it will be + clamped to the end of the buffer. Therefore, the number of bytes you're given may be less than the number + you requested. + +- After calling ma_rb_acquire_read/write(), you do your work on the buffer and then "commit" it with + ma_rb_commit_read/write(). This is where the read/write pointers are updated. When you commit you need to + pass in the buffer that was returned by the earlier call to ma_rb_acquire_read/write() and is only used + for validation. The number of bytes passed to ma_rb_commit_read/write() is what's used to increment the + pointers. + +- If you want to correct for drift between the write pointer and the read pointer you can use a combination + of ma_rb_pointer_distance(), ma_rb_seek_read() and ma_rb_seek_write(). Note that you can only move the + pointers forward, and you should only move the read pointer forward via the consumer thread, and the write + pointer forward by the producer thread. If there is too much space between the pointers, move the read + pointer forward. If there is too little space between the pointers, move the write pointer forward. + + +Notes +----- +- Thread safety depends on a single producer, single consumer model. Only one thread is allowed to write, and + only one thread is allowed to read. The producer is the only one allowed to move the write pointer, and the + consumer is the only one allowed to move the read pointer. +- Operates on bytes. Use ma_pcm_rb to operate in terms of PCM frames. +- Maximum buffer size in bytes is 0x7FFFFFFF-(MA_SIMD_ALIGNMENT-1) because of reasons. + + +PCM Ring Buffer +=============== +This is the same as the regular ring buffer, except that it works on PCM frames instead of bytes. + +************************************************************************************************************************************************************/ +typedef struct +{ + void* pBuffer; + ma_uint32 subbufferSizeInBytes; + ma_uint32 subbufferCount; + ma_uint32 subbufferStrideInBytes; + volatile ma_uint32 encodedReadOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */ + volatile ma_uint32 encodedWriteOffset; /* Most significant bit is the loop flag. Lower 31 bits contains the actual offset in bytes. */ + ma_bool32 ownsBuffer : 1; /* Used to know whether or not miniaudio is responsible for free()-ing the buffer. */ + ma_bool32 clearOnWriteAcquire : 1; /* When set, clears the acquired write buffer before returning from ma_rb_acquire_write(). */ + ma_allocation_callbacks allocationCallbacks; +} ma_rb; + +ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); +ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB); +void ma_rb_uninit(ma_rb* pRB); +void ma_rb_reset(ma_rb* pRB); +ma_result ma_rb_acquire_read(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); +ma_result ma_rb_commit_read(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut); +ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferOut); +ma_result ma_rb_commit_write(ma_rb* pRB, size_t sizeInBytes, void* pBufferOut); +ma_result ma_rb_seek_read(ma_rb* pRB, size_t offsetInBytes); +ma_result ma_rb_seek_write(ma_rb* pRB, size_t offsetInBytes); +ma_int32 ma_rb_pointer_distance(ma_rb* pRB); /* Returns the distance between the write pointer and the read pointer. Should never be negative for a correct program. Will return the number of bytes that can be read before the read pointer hits the write pointer. */ +ma_uint32 ma_rb_available_read(ma_rb* pRB); +ma_uint32 ma_rb_available_write(ma_rb* pRB); +size_t ma_rb_get_subbuffer_size(ma_rb* pRB); +size_t ma_rb_get_subbuffer_stride(ma_rb* pRB); +size_t ma_rb_get_subbuffer_offset(ma_rb* pRB, size_t subbufferIndex); +void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer); + + +typedef struct +{ + ma_rb rb; + ma_format format; + ma_uint32 channels; +} ma_pcm_rb; + +ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); +ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB); +void ma_pcm_rb_uninit(ma_pcm_rb* pRB); +void ma_pcm_rb_reset(ma_pcm_rb* pRB); +ma_result ma_pcm_rb_acquire_read(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); +ma_result ma_pcm_rb_commit_read(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut); +ma_result ma_pcm_rb_acquire_write(ma_pcm_rb* pRB, ma_uint32* pSizeInFrames, void** ppBufferOut); +ma_result ma_pcm_rb_commit_write(ma_pcm_rb* pRB, ma_uint32 sizeInFrames, void* pBufferOut); +ma_result ma_pcm_rb_seek_read(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); +ma_result ma_pcm_rb_seek_write(ma_pcm_rb* pRB, ma_uint32 offsetInFrames); +ma_int32 ma_pcm_rb_pointer_disance(ma_pcm_rb* pRB); /* Return value is in frames. */ +ma_uint32 ma_pcm_rb_available_read(ma_pcm_rb* pRB); +ma_uint32 ma_pcm_rb_available_write(ma_pcm_rb* pRB); +ma_uint32 ma_pcm_rb_get_subbuffer_size(ma_pcm_rb* pRB); +ma_uint32 ma_pcm_rb_get_subbuffer_stride(ma_pcm_rb* pRB); +ma_uint32 ma_pcm_rb_get_subbuffer_offset(ma_pcm_rb* pRB, ma_uint32 subbufferIndex); +void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void* pBuffer); + + +/************************************************************************************************************************************************************ + +Miscellaneous Helpers + +************************************************************************************************************************************************************/ + +/* +malloc(). Calls MA_MALLOC(). +*/ +void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +realloc(). Calls MA_REALLOC(). +*/ +void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +free(). Calls MA_FREE(). +*/ +void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Performs an aligned malloc, with the assumption that the alignment is a power of 2. +*/ +void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Free's an aligned malloc'd buffer. +*/ +void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks); + +/* +Retrieves a friendly name for a format. +*/ +const char* ma_get_format_name(ma_format format); + +/* +Blends two frames in floating point format. +*/ +void ma_blend_f32(float* pOut, float* pInA, float* pInB, float factor, ma_uint32 channels); + +/* +Retrieves the size of a sample in bytes for the given format. + +This API is efficient and is implemented using a lookup table. + +Thread Safety: SAFE + This API is pure. +*/ +ma_uint32 ma_get_bytes_per_sample(ma_format format); +static MA_INLINE ma_uint32 ma_get_bytes_per_frame(ma_format format, ma_uint32 channels) { return ma_get_bytes_per_sample(format) * channels; } + +/* +Converts a log level to a string. +*/ +const char* ma_log_level_to_string(ma_uint32 logLevel); + + + +/************************************************************************************************************************************************************ +************************************************************************************************************************************************************* + +DEVICE I/O +========== + +This section contains the APIs for device playback and capture. Here is where you'll find ma_device_init(), etc. + +************************************************************************************************************************************************************* +************************************************************************************************************************************************************/ #ifndef MA_NO_DEVICE_IO /* Some backends are only supported on certain platforms. */ #if defined(MA_WIN32) @@ -1798,6 +1993,28 @@ typedef struct }; } ma_event; +typedef struct +{ + ma_context* pContext; + + union + { +#ifdef MA_WIN32 + struct + { + /*HANDLE*/ ma_handle hSemaphore; + } win32; +#endif +#ifdef MA_POSIX + struct + { + sem_t semaphore; + } posix; +#endif + int _unused; + }; +} ma_semaphore; + /* The callback for processing audio data from the device. @@ -1856,50 +2073,47 @@ typedef enum ma_share_mode_exclusive } ma_share_mode; +/* iOS/tvOS/watchOS session categories. */ +typedef enum +{ + ma_ios_session_category_default = 0, /* AVAudioSessionCategoryPlayAndRecord with AVAudioSessionCategoryOptionDefaultToSpeaker. */ + ma_ios_session_category_none, /* Leave the session category unchanged. */ + ma_ios_session_category_ambient, /* AVAudioSessionCategoryAmbient */ + ma_ios_session_category_solo_ambient, /* AVAudioSessionCategorySoloAmbient */ + ma_ios_session_category_playback, /* AVAudioSessionCategoryPlayback */ + ma_ios_session_category_record, /* AVAudioSessionCategoryRecord */ + ma_ios_session_category_play_and_record, /* AVAudioSessionCategoryPlayAndRecord */ + ma_ios_session_category_multi_route /* AVAudioSessionCategoryMultiRoute */ +} ma_ios_session_category; + +/* iOS/tvOS/watchOS session category options */ +typedef enum +{ + ma_ios_session_category_option_mix_with_others = 0x01, /* AVAudioSessionCategoryOptionMixWithOthers */ + ma_ios_session_category_option_duck_others = 0x02, /* AVAudioSessionCategoryOptionDuckOthers */ + ma_ios_session_category_option_allow_bluetooth = 0x04, /* AVAudioSessionCategoryOptionAllowBluetooth */ + ma_ios_session_category_option_default_to_speaker = 0x08, /* AVAudioSessionCategoryOptionDefaultToSpeaker */ + ma_ios_session_category_option_interrupt_spoken_audio_and_mix_with_others = 0x11, /* AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers */ + ma_ios_session_category_option_allow_bluetooth_a2dp = 0x20, /* AVAudioSessionCategoryOptionAllowBluetoothA2DP */ + ma_ios_session_category_option_allow_air_play = 0x40, /* AVAudioSessionCategoryOptionAllowAirPlay */ +} ma_ios_session_category_option; + typedef union { -#ifdef MA_SUPPORT_WASAPI wchar_t wasapi[64]; /* WASAPI uses a wchar_t string for identification. */ -#endif -#ifdef MA_SUPPORT_DSOUND ma_uint8 dsound[16]; /* DirectSound uses a GUID for identification. */ -#endif -#ifdef MA_SUPPORT_WINMM /*UINT_PTR*/ ma_uint32 winmm; /* When creating a device, WinMM expects a Win32 UINT_PTR for device identification. In practice it's actually just a UINT. */ -#endif -#ifdef MA_SUPPORT_ALSA char alsa[256]; /* ALSA uses a name string for identification. */ -#endif -#ifdef MA_SUPPORT_PULSEAUDIO char pulse[256]; /* PulseAudio uses a name string for identification. */ -#endif -#ifdef MA_SUPPORT_JACK int jack; /* JACK always uses default devices. */ -#endif -#ifdef MA_SUPPORT_COREAUDIO char coreaudio[256]; /* Core Audio uses a string for identification. */ -#endif -#ifdef MA_SUPPORT_SNDIO char sndio[256]; /* "snd/0", etc. */ -#endif -#ifdef MA_SUPPORT_AUDIO4 char audio4[256]; /* "/dev/audio", etc. */ -#endif -#ifdef MA_SUPPORT_OSS char oss[64]; /* "dev/dsp0", etc. "dev/dsp" for the default device. */ -#endif -#ifdef MA_SUPPORT_AAUDIO - ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */ -#endif -#ifdef MA_SUPPORT_OPENSL + ma_int32 aaudio; /* AAudio uses a 32-bit integer for identification. */ ma_uint32 opensl; /* OpenSL|ES uses a 32-bit unsigned integer for identification. */ -#endif -#ifdef MA_SUPPORT_WEBAUDIO char webaudio[32]; /* Web Audio always uses default devices for now, but if this changes it'll be a GUID. */ -#endif -#ifdef MA_SUPPORT_NULL int nullbackend; /* The null backend uses an integer for device IDs. */ -#endif } ma_device_id; typedef struct @@ -1943,6 +2157,20 @@ typedef struct ma_device_callback_proc dataCallback; ma_stop_proc stopCallback; void* pUserData; + + struct + { + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfCount; + } linear; + struct + { + int quality; + } speex; + } resampling; + struct { ma_device_id* pDeviceID; @@ -1964,6 +2192,8 @@ typedef struct { ma_bool32 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ ma_bool32 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool32 noAutoStreamRouting; /* Disables automatic stream routing. */ + ma_bool32 noHardwareOffloading; /* Disables WASAPI's hardware offloading feature. */ } wasapi; struct { @@ -1981,7 +2211,7 @@ typedef struct ma_log_proc logCallback; ma_thread_priority threadPriority; void* pUserData; - + ma_allocation_callbacks allocationCallbacks; struct { ma_bool32 useVerboseDeviceEnumeration; @@ -1993,6 +2223,11 @@ typedef struct ma_bool32 tryAutoSpawn; /* Enables autospawning of the PulseAudio daemon if necessary. */ } pulse; struct + { + ma_ios_session_category sessionCategory; + ma_uint32 sessionCategoryOptions; + } coreaudio; + struct { const char* pClientName; ma_bool32 tryStartServer; @@ -2007,6 +2242,7 @@ struct ma_context ma_log_proc logCallback; ma_thread_priority threadPriority; void* pUserData; + ma_allocation_callbacks allocationCallbacks; ma_mutex deviceEnumLock; /* Used to make ma_context_get_devices() thread safe. */ ma_mutex deviceInfoLock; /* Used to make ma_context_get_device_info() thread safe. */ ma_uint32 deviceInfoCapacity; /* Total capacity of pDeviceInfos. */ @@ -2290,6 +2526,7 @@ struct ma_context ma_proc AAudioStreamBuilder_setBufferCapacityInFrames; ma_proc AAudioStreamBuilder_setFramesPerDataCallback; ma_proc AAudioStreamBuilder_setDataCallback; + ma_proc AAudioStreamBuilder_setErrorCallback; ma_proc AAudioStreamBuilder_setPerformanceMode; ma_proc AAudioStreamBuilder_openStream; ma_proc AAudioStream_close; @@ -2373,12 +2610,12 @@ struct ma_context }; }; -MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device +struct ma_device { ma_context* pContext; ma_device_type type; ma_uint32 sampleRate; - ma_uint32 state; + volatile ma_uint32 state; /* The state of the device is variable and can change at any time on any thread, so tell the compiler as such with `volatile`. */ ma_device_callback_proc onData; ma_stop_proc onStop; void* pUserData; /* Application defined data. */ @@ -2396,6 +2633,18 @@ MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device ma_bool32 noClip : 1; float masterVolumeFactor; struct + { + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfCount; + } linear; + struct + { + int quality; + } speex; + } resampling; + struct { char name[256]; /* Maybe temporary. Likely to be replaced with a query API. */ ma_share_mode shareMode; /* Set to whatever was passed in when the device was initialized. */ @@ -2411,9 +2660,7 @@ MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device ma_channel internalChannelMap[MA_MAX_CHANNELS]; ma_uint32 internalBufferSizeInFrames; ma_uint32 internalPeriods; - ma_pcm_converter converter; - ma_uint32 _dspFrameCount; /* Internal use only. Used as the data source when reading from the device. */ - const ma_uint8* _dspFrames; /* ^^^ AS ABOVE ^^^ */ + ma_data_converter converter; } playback; struct { @@ -2431,9 +2678,7 @@ MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device ma_channel internalChannelMap[MA_MAX_CHANNELS]; ma_uint32 internalBufferSizeInFrames; ma_uint32 internalPeriods; - ma_pcm_converter converter; - ma_uint32 _dspFrameCount; /* Internal use only. Used as the data source when reading from the device. */ - const ma_uint8* _dspFrames; /* ^^^ AS ABOVE ^^^ */ + ma_data_converter converter; } capture; union @@ -2458,10 +2703,13 @@ MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device ma_bool32 hasDefaultCaptureDeviceChanged; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ ma_uint32 periodSizeInFramesPlayback; ma_uint32 periodSizeInFramesCapture; - ma_bool32 isStartedCapture; - ma_bool32 isStartedPlayback; - ma_bool32 noAutoConvertSRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ - ma_bool32 noDefaultQualitySRC; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool32 isStartedCapture; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ + ma_bool32 isStartedPlayback; /* <-- Make sure this is always a whole 32-bits because we use atomic assignments. */ + ma_bool32 noAutoConvertSRC : 1; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM. */ + ma_bool32 noDefaultQualitySRC : 1; /* When set to true, disables the use of AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY. */ + ma_bool32 noHardwareOffloading : 1; + ma_bool32 allowCaptureAutoStreamRouting : 1; + ma_bool32 allowPlaybackAutoStreamRouting : 1; } wasapi; #endif #ifdef MA_SUPPORT_DSOUND @@ -2549,6 +2797,7 @@ MA_ALIGNED_STRUCT(MA_SIMD_ALIGNMENT) ma_device ma_bool32 isSwitchingPlaybackDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ ma_bool32 isSwitchingCaptureDevice; /* <-- Set to true when the default device has changed and miniaudio is in the process of switching. */ ma_pcm_rb duplexRB; + void* pRouteChangeHandler; /* Only used on mobile platforms. Obj-C object for handling route changes. */ } coreaudio; #endif #ifdef MA_SUPPORT_SNDIO @@ -3156,6 +3405,10 @@ float ma_gain_db_to_factor(float gain); /************************************************************************************************************************************************************ Decoding +======== + +Decoders are independent of the main device API. Decoding APIs can be called freely inside the device's data callback, but they are not thread safe unless +you do your own synchronization. ************************************************************************************************************************************************************/ #ifndef MA_NO_DECODING @@ -3168,8 +3421,9 @@ typedef enum ma_seek_origin_current } ma_seek_origin; -typedef size_t (* ma_decoder_read_proc) (ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead); /* Returns the number of bytes read. */ +typedef size_t (* ma_decoder_read_proc) (ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead); /* Returns the number of bytes read. */ typedef ma_bool32 (* ma_decoder_seek_proc) (ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin); +typedef ma_uint64 (* ma_decoder_read_pcm_frames_proc) (ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount); /* Returns the number of frames read. Output data is in internal format. */ typedef ma_result (* ma_decoder_seek_to_pcm_frame_proc) (ma_decoder* pDecoder, ma_uint64 frameIndex); typedef ma_result (* ma_decoder_uninit_proc) (ma_decoder* pDecoder); typedef ma_uint64 (* ma_decoder_get_length_in_pcm_frames_proc)(ma_decoder* pDecoder); @@ -3182,11 +3436,19 @@ typedef struct ma_channel channelMap[MA_MAX_CHANNELS]; ma_channel_mix_mode channelMixMode; ma_dither_mode ditherMode; - ma_src_algorithm srcAlgorithm; - union + struct { - ma_src_config_sinc sinc; - } src; + ma_resample_algorithm algorithm; + struct + { + ma_uint32 lpfCount; + } linear; + struct + { + int quality; + } speex; + } resampling; + ma_allocation_callbacks allocationCallbacks; } ma_decoder_config; struct ma_decoder @@ -3203,7 +3465,9 @@ struct ma_decoder ma_uint32 outputChannels; ma_uint32 outputSampleRate; ma_channel outputChannelMap[MA_MAX_CHANNELS]; - ma_pcm_converter dsp; /* <-- Format conversion is achieved by running frames through this. */ + ma_data_converter converter; /* <-- Data conversion is achieved by running frames through this. */ + ma_allocation_callbacks allocationCallbacks; + ma_decoder_read_pcm_frames_proc onReadPCMFrames; ma_decoder_seek_to_pcm_frame_proc onSeekToPCMFrame; ma_decoder_uninit_proc onUninit; ma_decoder_get_length_in_pcm_frames_proc onGetLengthInPCMFrames; @@ -3258,11 +3522,24 @@ If the length is unknown or an error occurs, 0 will be returned. This will always return 0 for Vorbis decoders. This is due to a limitation with stb_vorbis in push mode which is what miniaudio uses internally. -This will run in linear time for MP3 decoders. Do not call this in time critical scenarios. +For MP3's, this will decode the entire file. Do not call this in time critical scenarios. + +This function is not thread safe without your own synchronization. */ ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder); +/* +Reads PCM frames from the given decoder. + +This is not thread safe without your own synchronization. +*/ ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount); + +/* +Seeks to a PCM frame based on it's absolute index. + +This is not thread safe without your own synchronization. +*/ ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex); /* @@ -3291,8 +3568,7 @@ typedef struct } ma_sine_wave; ma_result ma_sine_wave_init(double amplitude, double period, ma_uint32 sampleRate, ma_sine_wave* pSineWave); -ma_uint64 ma_sine_wave_read_f32(ma_sine_wave* pSineWave, ma_uint64 count, float* pSamples); -ma_uint64 ma_sine_wave_read_f32_ex(ma_sine_wave* pSineWave, ma_uint64 frameCount, ma_uint32 channels, ma_stream_layout layout, float** ppFrames); +ma_uint64 ma_sine_wave_read_pcm_frames(ma_sine_wave* pSineWave, void* pFramesOut, ma_uint64 frameCount, ma_format format, ma_uint32 channels); #ifdef __cplusplus } @@ -3313,86 +3589,19 @@ IMPLEMENTATION #include /* For INT_MAX */ #include /* sin(), etc. */ -#if defined(MA_DEBUG_OUTPUT) -#include /* for printf() for debug output */ +#if !defined(MA_NO_STDIO) || defined(MA_DEBUG_OUTPUT) + #include + #if !defined(_MSC_VER) && !defined(__DMC__) + #include /* For strcasecmp(). */ + #include /* For wcslen(), wcsrtombs() */ + #endif #endif #ifdef MA_WIN32 -// @raysan5: To avoid conflicting windows.h symbols with raylib, so flags are defined -// WARNING: Those flags avoid inclusion of some Win32 headers that could be required -// by user at some point and won't be included... -//------------------------------------------------------------------------------------- - -// If defined, the following flags inhibit definition of the indicated items. -#define NOGDICAPMASKS // CC_*, LC_*, PC_*, CP_*, TC_*, RC_ -#define NOVIRTUALKEYCODES // VK_* -#define NOWINMESSAGES // WM_*, EM_*, LB_*, CB_* -#define NOWINSTYLES // WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_* -#define NOSYSMETRICS // SM_* -#define NOMENUS // MF_* -#define NOICONS // IDI_* -#define NOKEYSTATES // MK_* -#define NOSYSCOMMANDS // SC_* -#define NORASTEROPS // Binary and Tertiary raster ops -#define NOSHOWWINDOW // SW_* -#define OEMRESOURCE // OEM Resource values -#define NOATOM // Atom Manager routines -#define NOCLIPBOARD // Clipboard routines -#define NOCOLOR // Screen colors -#define NOCTLMGR // Control and Dialog routines -#define NODRAWTEXT // DrawText() and DT_* -#define NOGDI // All GDI defines and routines -#define NOKERNEL // All KERNEL defines and routines -#define NOUSER // All USER defines and routines -//#define NONLS // All NLS defines and routines -#define NOMB // MB_* and MessageBox() -#define NOMEMMGR // GMEM_*, LMEM_*, GHND, LHND, associated routines -#define NOMETAFILE // typedef METAFILEPICT -#define NOMINMAX // Macros min(a,b) and max(a,b) -#define NOMSG // typedef MSG and associated routines -#define NOOPENFILE // OpenFile(), OemToAnsi, AnsiToOem, and OF_* -#define NOSCROLL // SB_* and scrolling routines -#define NOSERVICE // All Service Controller routines, SERVICE_ equates, etc. -#define NOSOUND // Sound driver routines -#define NOTEXTMETRIC // typedef TEXTMETRIC and associated routines -#define NOWH // SetWindowsHook and WH_* -#define NOWINOFFSETS // GWL_*, GCL_*, associated routines -#define NOCOMM // COMM driver routines -#define NOKANJI // Kanji support stuff. -#define NOHELP // Help engine interface. -#define NOPROFILER // Profiler interface. -#define NODEFERWINDOWPOS // DeferWindowPos routines -#define NOMCX // Modem Configuration Extensions - -// Type required before windows.h inclusion -typedef struct tagMSG *LPMSG; - #include - -// Type required by some unused function... -typedef struct tagBITMAPINFOHEADER { - DWORD biSize; - LONG biWidth; - LONG biHeight; - WORD biPlanes; - WORD biBitCount; - DWORD biCompression; - DWORD biSizeImage; - LONG biXPelsPerMeter; - LONG biYPelsPerMeter; - DWORD biClrUsed; - DWORD biClrImportant; -} BITMAPINFOHEADER, *PBITMAPINFOHEADER; - #include #include #include - -// @raysan5: Some required types defined for MSVC/TinyC compiler -#if defined(_MSC_VER) || defined(__TINYC__) - #include "propidl.h" -#endif -//---------------------------------------------------------------------------------- #else #include /* For malloc(), free(), wcstombs(). */ #include /* For memset() */ @@ -3747,6 +3956,23 @@ static MA_INLINE ma_bool32 ma_has_neon() #endif } +#define MA_SIMD_NONE 0 +#define MA_SIMD_SSE2 1 +#define MA_SIMD_AVX2 2 +#define MA_SIMD_NEON 3 + +#ifndef MA_PREFERRED_SIMD + # if defined(MA_SUPPORT_SSE2) && defined(MA_PREFER_SSE2) + #define MA_PREFERRED_SIMD MA_SIMD_SSE2 + #elif defined(MA_SUPPORT_AVX2) && defined(MA_PREFER_AVX2) + #define MA_PREFERRED_SIMD MA_SIMD_AVX2 + #elif defined(MA_SUPPORT_NEON) && defined(MA_PREFER_NEON) + #define MA_PREFERRED_SIMD MA_SIMD_NEON + #else + #define MA_PREFERRED_SIMD MA_SIMD_NONE + #endif +#endif + static MA_INLINE ma_bool32 ma_is_little_endian() { @@ -3765,7 +3991,7 @@ static MA_INLINE ma_bool32 ma_is_big_endian() #ifndef MA_COINIT_VALUE -#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED*/ +#define MA_COINIT_VALUE 0 /* 0 = COINIT_MULTITHREADED */ #endif @@ -3904,16 +4130,17 @@ Standard Library Stuff #endif #endif -#define ma_zero_memory MA_ZERO_MEMORY -#define ma_copy_memory MA_COPY_MEMORY -#define ma_assert MA_ASSERT +#define MA_ZERO_OBJECT(p) MA_ZERO_MEMORY((p), sizeof(*(p))) -#define ma_zero_object(p) ma_zero_memory((p), sizeof(*(p))) -#define ma_countof(x) (sizeof(x) / sizeof(x[0])) -#define ma_max(x, y) (((x) > (y)) ? (x) : (y)) -#define ma_min(x, y) (((x) < (y)) ? (x) : (y)) -#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi))) -#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) +#define ma_countof(x) (sizeof(x) / sizeof(x[0])) +#define ma_max(x, y) (((x) > (y)) ? (x) : (y)) +#define ma_min(x, y) (((x) < (y)) ? (x) : (y)) +#define ma_clamp(x, lo, hi) (ma_max(lo, ma_min(x, hi))) +#define ma_offset_ptr(p, offset) (((ma_uint8*)(p)) + (offset)) + +#define ma_floorf(x) ((float)floor((double)(x))) +#define ma_sinf(x) ((float)sin((double)(x))) +#define ma_cosf(x) ((float)cos((double)(x))) #define ma_buffer_frame_capacity(buffer, channels, format) (sizeof(buffer) / ma_get_bytes_per_sample(format) / (channels)) @@ -4184,10 +4411,10 @@ int ma_strappend(char* dst, size_t dstSize, const char* srcA, const char* srcB) return result; } -char* ma_copy_string(const char* src) +char* ma_copy_string(const char* src, const ma_allocation_callbacks* pAllocationCallbacks) { size_t sz = strlen(src)+1; - char* dst = (char*)ma_malloc(sz); + char* dst = (char*)ma_malloc(sz, pAllocationCallbacks); if (dst == NULL) { return NULL; } @@ -4198,6 +4425,46 @@ char* ma_copy_string(const char* src) } +static MA_INLINE void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_COPY_MEMORY(dst, src, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToCopyNow = sizeInBytes; + if (bytesToCopyNow > MA_SIZE_MAX) { + bytesToCopyNow = MA_SIZE_MAX; + } + + MA_COPY_MEMORY(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToCopyNow; + dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow); + src = (const void*)((const ma_uint8*)src + bytesToCopyNow); + } +#endif +} + +static MA_INLINE void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes) +{ +#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX + MA_ZERO_MEMORY(dst, (size_t)sizeInBytes); +#else + while (sizeInBytes > 0) { + ma_uint64 bytesToZeroNow = sizeInBytes; + if (bytesToZeroNow > MA_SIZE_MAX) { + bytesToZeroNow = MA_SIZE_MAX; + } + + MA_ZERO_MEMORY(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */ + + sizeInBytes -= bytesToZeroNow; + dst = (void*)((ma_uint8*)dst + bytesToZeroNow); + } +#endif +} + + /* Thanks to good old Bit Twiddling Hacks for this one: http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 */ static MA_INLINE unsigned int ma_next_power_of_2(unsigned int x) { @@ -4264,6 +4531,7 @@ static MA_INLINE float ma_mix_f32_fast(float x, float y, float a) /*return x + (y - x)*a;*/ } + #if defined(MA_SUPPORT_SSE2) static MA_INLINE __m128 ma_mix_f32_fast__sse2(__m128 x, __m128 y, __m128 a) { @@ -4305,6 +4573,25 @@ static MA_INLINE float ma_scale_to_range_f32(float x, float lo, float hi) } +/* +Greatest common factor using Euclid's algorithm iteratively. +*/ +static MA_INLINE ma_uint32 ma_gcf_u32(ma_uint32 a, ma_uint32 b) +{ + for (;;) { + if (b == 0) { + break; + } else { + ma_uint32 t = a; + a = b; + b = t % a; + } + } + + return a; +} + + /* Random Number Generation @@ -4319,12 +4606,12 @@ for miniaudio's purposes. #define MA_LCG_C 0 static ma_int32 g_maLCG = 4321; /* Non-zero initial seed. Use ma_seed() to use an explicit seed. */ -void ma_seed(ma_int32 seed) +static MA_INLINE void ma_seed(ma_int32 seed) { g_maLCG = seed; } -ma_int32 ma_rand_s32() +static MA_INLINE ma_int32 ma_rand_s32() { ma_int32 lcg = g_maLCG; ma_int32 r = (MA_LCG_A * lcg + MA_LCG_C) % MA_LCG_M; @@ -4332,27 +4619,27 @@ ma_int32 ma_rand_s32() return r; } -ma_uint32 ma_rand_u32() +static MA_INLINE ma_uint32 ma_rand_u32() { return (ma_uint32)ma_rand_s32(); } -double ma_rand_f64() +static MA_INLINE double ma_rand_f64() { return ma_rand_s32() / (double)0x7FFFFFFF; } -float ma_rand_f32() +static MA_INLINE float ma_rand_f32() { return (float)ma_rand_f64(); } -float ma_rand_range_f32(float lo, float hi) +static MA_INLINE float ma_rand_range_f32(float lo, float hi) { return ma_scale_to_range_f32(ma_rand_f32(), lo, hi); } -ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi) +static MA_INLINE ma_int32 ma_rand_range_s32(ma_int32 lo, ma_int32 hi) { if (lo == hi) { return lo; @@ -4402,52 +4689,6 @@ static MA_INLINE ma_int32 ma_dither_s32(ma_dither_mode ditherMode, ma_int32 dith } -/* -Splits a buffer into parts of equal length and of the given alignment. The returned size of the split buffers will be a -multiple of the alignment. The alignment must be a power of 2. -*/ -void ma_split_buffer(void* pBuffer, size_t bufferSize, size_t splitCount, size_t alignment, void** ppBuffersOut, size_t* pSplitSizeOut) -{ - ma_uintptr pBufferUnaligned; - ma_uintptr pBufferAligned; - size_t unalignedBytes; - size_t splitSize; - - if (pSplitSizeOut) { - *pSplitSizeOut = 0; - } - - if (pBuffer == NULL || bufferSize == 0 || splitCount == 0) { - return; - } - - if (alignment == 0) { - alignment = 1; - } - - pBufferUnaligned = (ma_uintptr)pBuffer; - pBufferAligned = (pBufferUnaligned + (alignment-1)) & ~(alignment-1); - unalignedBytes = (size_t)(pBufferAligned - pBufferUnaligned); - - splitSize = 0; - if (bufferSize >= unalignedBytes) { - splitSize = (bufferSize - unalignedBytes) / splitCount; - splitSize = splitSize & ~(alignment-1); - } - - if (ppBuffersOut != NULL) { - size_t i; - for (i = 0; i < splitCount; ++i) { - ppBuffersOut[i] = (ma_uint8*)(pBufferAligned + (splitSize*i)); - } - } - - if (pSplitSizeOut) { - *pSplitSizeOut = splitSize; - } -} - - /****************************************************************************** Atomics @@ -4495,18 +4736,129 @@ Atomics #endif -ma_uint32 ma_get_standard_sample_rate_priority_index(ma_uint32 sampleRate) /* Lower = higher priority */ +static void* ma__malloc_default(size_t sz, void* pUserData) { - ma_uint32 i; - for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) { - if (g_maStandardSampleRatePriorities[i] == sampleRate) { - return i; + (void)pUserData; + return MA_MALLOC(sz); +} + +static void* ma__realloc_default(void* p, size_t sz, void* pUserData) +{ + (void)pUserData; + return MA_REALLOC(p, sz); +} + +static void ma__free_default(void* p, void* pUserData) +{ + (void)pUserData; + MA_FREE(p); +} + + +static void* ma__malloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + + if (pAllocationCallbacks->onMalloc != NULL) { + return pAllocationCallbacks->onMalloc(sz, pAllocationCallbacks->pUserData); + } + + /* Try using realloc(). */ + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(NULL, sz, pAllocationCallbacks->pUserData); + } + + return NULL; +} + +static void* ma__realloc_from_callbacks(void* p, size_t szNew, size_t szOld, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (pAllocationCallbacks == NULL) { + return NULL; + } + + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, szNew, pAllocationCallbacks->pUserData); + } + + /* Try emulating realloc() in terms of malloc()/free(). */ + if (pAllocationCallbacks->onMalloc != NULL && pAllocationCallbacks->onFree != NULL) { + void* p2; + + p2 = pAllocationCallbacks->onMalloc(szNew, pAllocationCallbacks->pUserData); + if (p2 == NULL) { + return NULL; + } + + if (p != NULL) { + MA_COPY_MEMORY(p2, p, szOld); + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); } + + return p2; } - return (ma_uint32)-1; + return NULL; +} + +static MA_INLINE void* ma__calloc_from_callbacks(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) +{ + void* p = ma__malloc_from_callbacks(sz, pAllocationCallbacks); + if (p != NULL) { + MA_ZERO_MEMORY(p, sz); + } + + return p; +} + +static void ma__free_from_callbacks(void* p, const ma_allocation_callbacks* pAllocationCallbacks) +{ + if (p == NULL || pAllocationCallbacks == NULL) { + return; + } + + if (pAllocationCallbacks->onFree != NULL) { + pAllocationCallbacks->onFree(p, pAllocationCallbacks->pUserData); + } +} + +static ma_allocation_callbacks ma_allocation_callbacks_init_default() +{ + ma_allocation_callbacks callbacks; + callbacks.pUserData = NULL; + callbacks.onMalloc = ma__malloc_default; + callbacks.onRealloc = ma__realloc_default; + callbacks.onFree = ma__free_default; + + return callbacks; +} + +static ma_result ma_allocation_callbacks_init_copy(ma_allocation_callbacks* pDst, const ma_allocation_callbacks* pSrc) +{ + if (pDst == NULL) { + return MA_INVALID_ARGS; + } + + if (pSrc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->pUserData == NULL && pSrc->onFree == NULL && pSrc->onMalloc == NULL && pSrc->onRealloc == NULL) { + *pDst = ma_allocation_callbacks_init_default(); + } else { + if (pSrc->onFree == NULL || (pSrc->onMalloc == NULL && pSrc->onRealloc == NULL)) { + return MA_INVALID_ARGS; /* Invalid allocation callbacks. */ + } else { + *pDst = *pSrc; + } + } + } + + return MA_SUCCESS; } + ma_uint64 ma_calculate_frame_count_after_src(ma_uint32 sampleRateOut, ma_uint32 sampleRateIn, ma_uint64 frameCountIn) { double srcRatio = (double)sampleRateOut / sampleRateIn; @@ -4521,6 +4873,9 @@ ma_uint64 ma_calculate_frame_count_after_src(ma_uint32 sampleRateOut, ma_uint32 return frameCountOut; } +#ifndef MA_DATA_CONVERTER_STACK_BUFFER_SIZE +#define MA_DATA_CONVERTER_STACK_BUFFER_SIZE 4096 +#endif /************************************************************************************************************************************************************ ************************************************************************************************************************************************************* @@ -4711,7 +5066,7 @@ const char* ma_log_level_to_string(ma_uint32 logLevel) } /* Posts a log message. */ -void ma_log(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message) +static void ma_log(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message) { if (pContext == NULL) { return; @@ -4736,7 +5091,7 @@ void ma_log(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const } /* Posts an log message. Throw a breakpoint in here if you're needing to debug. The return value is always "resultCode". */ -ma_result ma_context_post_error(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode) +static ma_result ma_context_post_error(ma_context* pContext, ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode) { /* Derive the context from the device if necessary. */ if (pContext == NULL) { @@ -4749,7 +5104,7 @@ ma_result ma_context_post_error(ma_context* pContext, ma_device* pDevice, ma_uin return resultCode; } -ma_result ma_post_error(ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode) +static ma_result ma_post_error(ma_device* pDevice, ma_uint32 logLevel, const char* message, ma_result resultCode) { return ma_context_post_error(NULL, pDevice, logLevel, message, resultCode); } @@ -4762,7 +5117,7 @@ Timing *******************************************************************************/ #ifdef MA_WIN32 LARGE_INTEGER g_ma_TimerFrequency = {{0}}; -void ma_timer_init(ma_timer* pTimer) +static void ma_timer_init(ma_timer* pTimer) { LARGE_INTEGER counter; @@ -4774,7 +5129,7 @@ void ma_timer_init(ma_timer* pTimer) pTimer->counter = counter.QuadPart; } -double ma_timer_get_time_in_seconds(ma_timer* pTimer) +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) { LARGE_INTEGER counter; if (!QueryPerformanceCounter(&counter)) { @@ -4785,7 +5140,7 @@ double ma_timer_get_time_in_seconds(ma_timer* pTimer) } #elif defined(MA_APPLE) && (__MAC_OS_X_VERSION_MIN_REQUIRED < 101200) ma_uint64 g_ma_TimerFrequency = 0; -void ma_timer_init(ma_timer* pTimer) +static void ma_timer_init(ma_timer* pTimer) { mach_timebase_info_data_t baseTime; mach_timebase_info(&baseTime); @@ -4794,7 +5149,7 @@ void ma_timer_init(ma_timer* pTimer) pTimer->counter = mach_absolute_time(); } -double ma_timer_get_time_in_seconds(ma_timer* pTimer) +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) { ma_uint64 newTimeCounter = mach_absolute_time(); ma_uint64 oldTimeCounter = pTimer->counter; @@ -4802,12 +5157,12 @@ double ma_timer_get_time_in_seconds(ma_timer* pTimer) return (newTimeCounter - oldTimeCounter) / g_ma_TimerFrequency; } #elif defined(MA_EMSCRIPTEN) -void ma_timer_init(ma_timer* pTimer) +static void ma_timer_init(ma_timer* pTimer) { pTimer->counterD = emscripten_get_now(); } -double ma_timer_get_time_in_seconds(ma_timer* pTimer) +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) { return (emscripten_get_now() - pTimer->counterD) / 1000; /* Emscripten is in milliseconds. */ } @@ -4819,7 +5174,7 @@ double ma_timer_get_time_in_seconds(ma_timer* pTimer) #define MA_CLOCK_ID CLOCK_REALTIME #endif -void ma_timer_init(ma_timer* pTimer) +static void ma_timer_init(ma_timer* pTimer) { struct timespec newTime; clock_gettime(MA_CLOCK_ID, &newTime); @@ -4827,7 +5182,7 @@ void ma_timer_init(ma_timer* pTimer) pTimer->counter = (newTime.tv_sec * 1000000000) + newTime.tv_nsec; } -double ma_timer_get_time_in_seconds(ma_timer* pTimer) +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) { ma_uint64 newTimeCounter; ma_uint64 oldTimeCounter; @@ -4841,7 +5196,7 @@ double ma_timer_get_time_in_seconds(ma_timer* pTimer) return (newTimeCounter - oldTimeCounter) / 1000000000.0; } #else -void ma_timer_init(ma_timer* pTimer) +static void ma_timer_init(ma_timer* pTimer) { struct timeval newTime; gettimeofday(&newTime, NULL); @@ -4849,7 +5204,7 @@ void ma_timer_init(ma_timer* pTimer) pTimer->counter = (newTime.tv_sec * 1000000) + newTime.tv_usec; } -double ma_timer_get_time_in_seconds(ma_timer* pTimer) +static double ma_timer_get_time_in_seconds(ma_timer* pTimer) { ma_uint64 newTimeCounter; ma_uint64 oldTimeCounter; @@ -4970,7 +5325,7 @@ Threading *******************************************************************************/ #ifdef MA_WIN32 -int ma_thread_priority_to_win32(ma_thread_priority priority) +static int ma_thread_priority_to_win32(ma_thread_priority priority) { switch (priority) { case ma_thread_priority_idle: return THREAD_PRIORITY_IDLE; @@ -4984,7 +5339,7 @@ int ma_thread_priority_to_win32(ma_thread_priority priority) } } -ma_result ma_thread_create__win32(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) +static ma_result ma_thread_create__win32(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) { pThread->win32.hThread = CreateThread(NULL, 0, entryProc, pData, 0, NULL); if (pThread->win32.hThread == NULL) { @@ -4996,18 +5351,18 @@ ma_result ma_thread_create__win32(ma_context* pContext, ma_thread* pThread, ma_t return MA_SUCCESS; } -void ma_thread_wait__win32(ma_thread* pThread) +static void ma_thread_wait__win32(ma_thread* pThread) { WaitForSingleObject(pThread->win32.hThread, INFINITE); } -void ma_sleep__win32(ma_uint32 milliseconds) +static void ma_sleep__win32(ma_uint32 milliseconds) { Sleep((DWORD)milliseconds); } -ma_result ma_mutex_init__win32(ma_context* pContext, ma_mutex* pMutex) +static ma_result ma_mutex_init__win32(ma_context* pContext, ma_mutex* pMutex) { (void)pContext; @@ -5019,23 +5374,23 @@ ma_result ma_mutex_init__win32(ma_context* pContext, ma_mutex* pMutex) return MA_SUCCESS; } -void ma_mutex_uninit__win32(ma_mutex* pMutex) +static void ma_mutex_uninit__win32(ma_mutex* pMutex) { CloseHandle(pMutex->win32.hMutex); } -void ma_mutex_lock__win32(ma_mutex* pMutex) +static void ma_mutex_lock__win32(ma_mutex* pMutex) { WaitForSingleObject(pMutex->win32.hMutex, INFINITE); } -void ma_mutex_unlock__win32(ma_mutex* pMutex) +static void ma_mutex_unlock__win32(ma_mutex* pMutex) { SetEvent(pMutex->win32.hMutex); } -ma_result ma_event_init__win32(ma_context* pContext, ma_event* pEvent) +static ma_result ma_event_init__win32(ma_context* pContext, ma_event* pEvent) { (void)pContext; @@ -5047,20 +5402,48 @@ ma_result ma_event_init__win32(ma_context* pContext, ma_event* pEvent) return MA_SUCCESS; } -void ma_event_uninit__win32(ma_event* pEvent) +static void ma_event_uninit__win32(ma_event* pEvent) { CloseHandle(pEvent->win32.hEvent); } -ma_bool32 ma_event_wait__win32(ma_event* pEvent) +static ma_bool32 ma_event_wait__win32(ma_event* pEvent) { return WaitForSingleObject(pEvent->win32.hEvent, INFINITE) == WAIT_OBJECT_0; } -ma_bool32 ma_event_signal__win32(ma_event* pEvent) +static ma_bool32 ma_event_signal__win32(ma_event* pEvent) { return SetEvent(pEvent->win32.hEvent); } + + +static ma_result ma_semaphore_init__win32(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore) +{ + (void)pContext; + + pSemaphore->win32.hSemaphore = CreateSemaphoreA(NULL, (LONG)initialValue, LONG_MAX, NULL); + if (pSemaphore->win32.hSemaphore == NULL) { + return MA_FAILED_TO_CREATE_SEMAPHORE; + } + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__win32(ma_semaphore* pSemaphore) +{ + CloseHandle((HANDLE)pSemaphore->win32.hSemaphore); +} + +static ma_bool32 ma_semaphore_wait__win32(ma_semaphore* pSemaphore) +{ + return WaitForSingleObject((HANDLE)pSemaphore->win32.hSemaphore, INFINITE) == WAIT_OBJECT_0; +} + +static ma_bool32 ma_semaphore_release__win32(ma_semaphore* pSemaphore) +{ + return ReleaseSemaphore((HANDLE)pSemaphore->win32.hSemaphore, 1, NULL) != 0; +} #endif @@ -5083,7 +5466,7 @@ typedef int (* ma_pthread_attr_setschedpolicy_proc)(pthread_attr_t *attr, int po typedef int (* ma_pthread_attr_getschedparam_proc)(const pthread_attr_t *attr, struct sched_param *param); typedef int (* ma_pthread_attr_setschedparam_proc)(pthread_attr_t *attr, const struct sched_param *param); -ma_result ma_thread_create__posix(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) +static ma_result ma_thread_create__posix(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) { int result; pthread_attr_t* pAttr = NULL; @@ -5150,16 +5533,16 @@ ma_result ma_thread_create__posix(ma_context* pContext, ma_thread* pThread, ma_t return MA_SUCCESS; } -void ma_thread_wait__posix(ma_thread* pThread) +static void ma_thread_wait__posix(ma_thread* pThread) { ((ma_pthread_join_proc)pThread->pContext->posix.pthread_join)(pThread->posix.thread, NULL); } -void ma_sleep__posix(ma_uint32 milliseconds) +static void ma_sleep__posix(ma_uint32 milliseconds) { #ifdef MA_EMSCRIPTEN (void)milliseconds; - ma_assert(MA_FALSE); /* The Emscripten build should never sleep. */ + MA_ASSERT(MA_FALSE); /* The Emscripten build should never sleep. */ #else #if _POSIX_C_SOURCE >= 199309L struct timespec ts; @@ -5176,7 +5559,7 @@ void ma_sleep__posix(ma_uint32 milliseconds) } -ma_result ma_mutex_init__posix(ma_context* pContext, ma_mutex* pMutex) +static ma_result ma_mutex_init__posix(ma_context* pContext, ma_mutex* pMutex) { int result = ((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pMutex->posix.mutex, NULL); if (result != 0) { @@ -5186,23 +5569,23 @@ ma_result ma_mutex_init__posix(ma_context* pContext, ma_mutex* pMutex) return MA_SUCCESS; } -void ma_mutex_uninit__posix(ma_mutex* pMutex) +static void ma_mutex_uninit__posix(ma_mutex* pMutex) { ((ma_pthread_mutex_destroy_proc)pMutex->pContext->posix.pthread_mutex_destroy)(&pMutex->posix.mutex); } -void ma_mutex_lock__posix(ma_mutex* pMutex) +static void ma_mutex_lock__posix(ma_mutex* pMutex) { ((ma_pthread_mutex_lock_proc)pMutex->pContext->posix.pthread_mutex_lock)(&pMutex->posix.mutex); } -void ma_mutex_unlock__posix(ma_mutex* pMutex) +static void ma_mutex_unlock__posix(ma_mutex* pMutex) { ((ma_pthread_mutex_unlock_proc)pMutex->pContext->posix.pthread_mutex_unlock)(&pMutex->posix.mutex); } -ma_result ma_event_init__posix(ma_context* pContext, ma_event* pEvent) +static ma_result ma_event_init__posix(ma_context* pContext, ma_event* pEvent) { if (((ma_pthread_mutex_init_proc)pContext->posix.pthread_mutex_init)(&pEvent->posix.mutex, NULL) != 0) { return MA_FAILED_TO_CREATE_MUTEX; @@ -5216,40 +5599,72 @@ ma_result ma_event_init__posix(ma_context* pContext, ma_event* pEvent) return MA_SUCCESS; } -void ma_event_uninit__posix(ma_event* pEvent) +static void ma_event_uninit__posix(ma_event* pEvent) { ((ma_pthread_cond_destroy_proc)pEvent->pContext->posix.pthread_cond_destroy)(&pEvent->posix.condition); ((ma_pthread_mutex_destroy_proc)pEvent->pContext->posix.pthread_mutex_destroy)(&pEvent->posix.mutex); } -ma_bool32 ma_event_wait__posix(ma_event* pEvent) +static ma_bool32 ma_event_wait__posix(ma_event* pEvent) +{ + ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex); + { + while (pEvent->posix.value == 0) { + ((ma_pthread_cond_wait_proc)pEvent->pContext->posix.pthread_cond_wait)(&pEvent->posix.condition, &pEvent->posix.mutex); + } + pEvent->posix.value = 0; /* Auto-reset. */ + } + ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex); + + return MA_TRUE; +} + +static ma_bool32 ma_event_signal__posix(ma_event* pEvent) +{ + ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex); + { + pEvent->posix.value = 1; + ((ma_pthread_cond_signal_proc)pEvent->pContext->posix.pthread_cond_signal)(&pEvent->posix.condition); + } + ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex); + + return MA_TRUE; +} + + +static ma_result ma_semaphore_init__posix(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore) +{ + (void)pContext; + +#if defined(MA_APPLE) + /* Not yet implemented for Apple platforms since sem_init() is deprecated. Need to use a named semaphore via sem_open() instead. */ + return MA_INVALID_OPERATION; +#else + if (sem_init(&pSemaphore->posix.semaphore, 0, (unsigned int)initialValue) == 0) { + return MA_FAILED_TO_CREATE_SEMAPHORE; + } +#endif + + return MA_SUCCESS; +} + +static void ma_semaphore_uninit__posix(ma_semaphore* pSemaphore) { - ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex); - { - while (pEvent->posix.value == 0) { - ((ma_pthread_cond_wait_proc)pEvent->pContext->posix.pthread_cond_wait)(&pEvent->posix.condition, &pEvent->posix.mutex); - } - pEvent->posix.value = 0; /* Auto-reset. */ - } - ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex); - - return MA_TRUE; + sem_close(&pSemaphore->posix.semaphore); } -ma_bool32 ma_event_signal__posix(ma_event* pEvent) +static ma_bool32 ma_semaphore_wait__posix(ma_semaphore* pSemaphore) { - ((ma_pthread_mutex_lock_proc)pEvent->pContext->posix.pthread_mutex_lock)(&pEvent->posix.mutex); - { - pEvent->posix.value = 1; - ((ma_pthread_cond_signal_proc)pEvent->pContext->posix.pthread_cond_signal)(&pEvent->posix.condition); - } - ((ma_pthread_mutex_unlock_proc)pEvent->pContext->posix.pthread_mutex_unlock)(&pEvent->posix.mutex); + return sem_wait(&pSemaphore->posix.semaphore) != -1; +} - return MA_TRUE; +static ma_bool32 ma_semaphore_release__posix(ma_semaphore* pSemaphore) +{ + return sem_post(&pSemaphore->posix.semaphore) != -1; } #endif -ma_result ma_thread_create(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) +static ma_result ma_thread_create(ma_context* pContext, ma_thread* pThread, ma_thread_entry_proc entryProc, void* pData) { if (pContext == NULL || pThread == NULL || entryProc == NULL) { return MA_FALSE; @@ -5265,7 +5680,7 @@ ma_result ma_thread_create(ma_context* pContext, ma_thread* pThread, ma_thread_e #endif } -void ma_thread_wait(ma_thread* pThread) +static void ma_thread_wait(ma_thread* pThread) { if (pThread == NULL) { return; @@ -5279,7 +5694,7 @@ void ma_thread_wait(ma_thread* pThread) #endif } -void ma_sleep(ma_uint32 milliseconds) +static void ma_sleep(ma_uint32 milliseconds) { #ifdef MA_WIN32 ma_sleep__win32(milliseconds); @@ -5408,36 +5823,64 @@ ma_bool32 ma_event_signal(ma_event* pEvent) } -ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax) +ma_result ma_semaphore_init(ma_context* pContext, int initialValue, ma_semaphore* pSemaphore) { - /* Normalize the range in case we were given something stupid. */ - if (sampleRateMin < MA_MIN_SAMPLE_RATE) { - sampleRateMin = MA_MIN_SAMPLE_RATE; + if (pContext == NULL || pSemaphore == NULL) { + return MA_INVALID_ARGS; } - if (sampleRateMax > MA_MAX_SAMPLE_RATE) { - sampleRateMax = MA_MAX_SAMPLE_RATE; + +#ifdef MA_WIN32 + return ma_semaphore_init__win32(pContext, initialValue, pSemaphore); +#endif +#ifdef MA_POSIX + return ma_semaphore_init__posix(pContext, initialValue, pSemaphore); +#endif +} + +void ma_semaphore_uninit(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return; } - if (sampleRateMin > sampleRateMax) { - sampleRateMin = sampleRateMax; + +#ifdef MA_WIN32 + ma_semaphore_uninit__win32(pSemaphore); +#endif +#ifdef MA_POSIX + ma_semaphore_uninit__posix(pSemaphore); +#endif +} + +ma_bool32 ma_semaphore_wait(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_FALSE; } - if (sampleRateMin == sampleRateMax) { - return sampleRateMax; - } else { - size_t iStandardRate; - for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { - ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; - if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) { - return standardRate; - } - } +#ifdef MA_WIN32 + return ma_semaphore_wait__win32(pSemaphore); +#endif +#ifdef MA_POSIX + return ma_semaphore_wait__posix(pSemaphore); +#endif +} + +ma_bool32 ma_semaphore_release(ma_semaphore* pSemaphore) +{ + if (pSemaphore == NULL) { + return MA_FALSE; } - /* Should never get here. */ - ma_assert(MA_FALSE); - return 0; +#ifdef MA_WIN32 + return ma_semaphore_release__win32(pSemaphore); +#endif +#ifdef MA_POSIX + return ma_semaphore_release__posix(pSemaphore); +#endif } + +#if 0 ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn) { ma_uint32 closestRate = 0; @@ -5466,7 +5909,7 @@ ma_uint32 ma_get_closest_standard_sample_rate(ma_uint32 sampleRateIn) return closestRate; } - +#endif ma_uint32 ma_scale_buffer_size(ma_uint32 baseBufferSize, float scale) { @@ -5510,15 +5953,9 @@ ma_uint32 ma_get_default_buffer_size_in_frames(ma_performance_profile performanc return bufferSizeInMilliseconds * sampleRateMS; } -ma_uint32 ma_get_fragment_size_in_bytes(ma_uint32 bufferSizeInFrames, ma_uint32 periods, ma_format format, ma_uint32 channels) -{ - ma_uint32 fragmentSizeInFrames = bufferSizeInFrames / periods; - return fragmentSizeInFrames * ma_get_bytes_per_frame(format, channels); -} - void ma_zero_pcm_frames(void* p, ma_uint32 frameCount, ma_format format, ma_uint32 channels) { - ma_zero_memory(p, frameCount * ma_get_bytes_per_frame(format, channels)); + MA_ZERO_MEMORY(p, frameCount * ma_get_bytes_per_frame(format, channels)); } void ma_clip_samples_f32(float* p, ma_uint32 sampleCount) @@ -5714,7 +6151,7 @@ float ma_gain_db_to_factor(float gain) } -static MA_INLINE void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) +static void ma_device__on_data(ma_device* pDevice, void* pFramesOut, const void* pFramesIn, ma_uint32 frameCount) { ma_device_callback_proc onData; @@ -5726,7 +6163,7 @@ static MA_INLINE void ma_device__on_data(ma_device* pDevice, void* pFramesOut, c /* Volume control of input makes things a bit awkward because the input buffer is read-only. We'll need to use a temp buffer and loop in this case. */ if (pFramesIn != NULL && pDevice->masterVolumeFactor < 1) { - ma_uint8 tempFramesIn[8192]; + ma_uint8 tempFramesIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); ma_uint32 totalFramesProcessed = 0; @@ -5762,173 +6199,174 @@ static MA_INLINE void ma_device__on_data(ma_device* pDevice, void* pFramesOut, c } -/* The callback for reading from the client -> DSP -> device. */ -ma_uint32 ma_device__on_read_from_client(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) -{ - ma_device* pDevice = (ma_device*)pUserData; - - ma_assert(pDevice != NULL); - - ma_device__on_data(pDevice, pFramesOut, NULL, frameCount); - - (void)pDSP; - return frameCount; -} -/* The PCM converter callback for reading from a buffer. */ -ma_uint32 ma_device__pcm_converter__on_read_from_buffer_capture(ma_pcm_converter* pConverter, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +/* A helper function for reading sample data from the client. */ +static void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut) { - ma_device* pDevice = (ma_device*)pUserData; - ma_uint32 framesToRead; - ma_uint32 bytesToRead; - - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesOut != NULL); - if (pDevice->capture._dspFrameCount == 0) { - return 0; /* Nothing left. */ - } - - framesToRead = frameCount; - if (framesToRead > pDevice->capture._dspFrameCount) { - framesToRead = pDevice->capture._dspFrameCount; - } - - bytesToRead = framesToRead * ma_get_bytes_per_frame(pConverter->formatConverterIn.config.formatIn, pConverter->channelRouter.config.channelsIn); - - /* pDevice->capture._dspFrames can be null in which case it should be treated as silence. */ - if (pDevice->capture._dspFrames != NULL) { - ma_copy_memory(pFramesOut, pDevice->capture._dspFrames, bytesToRead); - pDevice->capture._dspFrames += bytesToRead; + if (pDevice->playback.converter.isPassthrough) { + ma_device__on_data(pDevice, pFramesOut, NULL, frameCount); } else { - ma_zero_memory(pFramesOut, bytesToRead); - } - - pDevice->capture._dspFrameCount -= framesToRead; - - return framesToRead; -} - -ma_uint32 ma_device__pcm_converter__on_read_from_buffer_playback(ma_pcm_converter* pConverter, void* pFramesOut, ma_uint32 frameCount, void* pUserData) -{ - ma_device* pDevice = (ma_device*)pUserData; - ma_uint32 framesToRead; - ma_uint32 bytesToRead; - - ma_assert(pDevice != NULL); - - if (pDevice->playback._dspFrameCount == 0) { - return 0; /* Nothing left. */ - } + ma_result result; + ma_uint64 totalFramesReadOut; + ma_uint64 totalFramesReadIn; + void* pRunningFramesOut; - framesToRead = frameCount; - if (framesToRead > pDevice->playback._dspFrameCount) { - framesToRead = pDevice->playback._dspFrameCount; - } + totalFramesReadOut = 0; + totalFramesReadIn = 0; + pRunningFramesOut = pFramesOut; - bytesToRead = framesToRead * ma_get_bytes_per_frame(pConverter->formatConverterIn.config.formatIn, pConverter->channelRouter.config.channelsIn); + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In client format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; - /* pDevice->playback._dspFrames can be null in which case it should be treated as silence. */ - if (pDevice->playback._dspFrames != NULL) { - ma_copy_memory(pFramesOut, pDevice->playback._dspFrames, bytesToRead); - pDevice->playback._dspFrames += bytesToRead; - } else { - ma_zero_memory(pFramesOut, bytesToRead); - } + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } - pDevice->playback._dspFrameCount -= framesToRead; - - return framesToRead; -} + requiredInputFrameCount = ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, frameCount); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + if (framesToReadThisIterationIn > 0) { + ma_device__on_data(pDevice, pIntermediaryBuffer, NULL, (ma_uint32)framesToReadThisIterationIn); + totalFramesReadIn += framesToReadThisIterationIn; + } + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationIn = framesToReadThisIterationIn; + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } -/* A helper function for reading sample data from the client. */ -static MA_INLINE void ma_device__read_frames_from_client(ma_device* pDevice, ma_uint32 frameCount, void* pFramesOut) -{ - ma_assert(pDevice != NULL); - ma_assert(frameCount > 0); - ma_assert(pFramesOut != NULL); + totalFramesReadOut += framesReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); - if (pDevice->playback.converter.isPassthrough) { - ma_device__on_data(pDevice, pFramesOut, NULL, frameCount); - } else { - ma_pcm_converter_read(&pDevice->playback.converter, pFramesOut, frameCount); + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } } } /* A helper for sending sample data to the client. */ -static MA_INLINE void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCount, const void* pFrames) +static void ma_device__send_frames_to_client(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat) { - ma_assert(pDevice != NULL); - ma_assert(frameCount > 0); - ma_assert(pFrames != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); if (pDevice->capture.converter.isPassthrough) { - ma_device__on_data(pDevice, NULL, pFrames, frameCount); + ma_device__on_data(pDevice, NULL, pFramesInDeviceFormat, frameCountInDeviceFormat); } else { - ma_uint8 chunkBuffer[4096]; - ma_uint32 chunkFrameCount; + ma_result result; + ma_uint8 pFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint64 framesInClientFormatCap = sizeof(pFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 totalDeviceFramesProcessed = 0; + ma_uint64 totalClientFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; - pDevice->capture._dspFrameCount = frameCount; - pDevice->capture._dspFrames = (const ma_uint8*)pFrames; + /* We just keep going until we've exhaused all of our input frames and cannot generate any more output frames. */ + for (;;) { + ma_uint64 deviceFramesProcessedThisIteration; + ma_uint64 clientFramesProcessedThisIteration; - chunkFrameCount = sizeof(chunkBuffer) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + deviceFramesProcessedThisIteration = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + clientFramesProcessedThisIteration = framesInClientFormatCap; - for (;;) { - ma_uint32 framesJustRead = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, chunkBuffer, chunkFrameCount); - if (framesJustRead == 0) { + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &deviceFramesProcessedThisIteration, pFramesInClientFormat, &clientFramesProcessedThisIteration); + if (result != MA_SUCCESS) { break; } - ma_device__on_data(pDevice, NULL, chunkBuffer, framesJustRead); + if (clientFramesProcessedThisIteration > 0) { + ma_device__on_data(pDevice, NULL, pFramesInClientFormat, (ma_uint32)clientFramesProcessedThisIteration); /* Safe cast. */ + } - if (framesJustRead < chunkFrameCount) { - break; + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, deviceFramesProcessedThisIteration * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += deviceFramesProcessedThisIteration; + totalClientFramesProcessed += clientFramesProcessedThisIteration; + + if (deviceFramesProcessedThisIteration == 0 && clientFramesProcessedThisIteration == 0) { + break; /* We're done. */ } } } } -static MA_INLINE ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCount, const void* pFramesInInternalFormat, ma_pcm_rb* pRB) + +/* We only want to expose ma_device__handle_duplex_callback_capture() and ma_device__handle_duplex_callback_playback() if we have an asynchronous backend enabled. */ +#if defined(MA_HAS_JACK) || \ + defined(MA_HAS_COREAUDIO) || \ + defined(MA_HAS_AAUDIO) || \ + defined(MA_HAS_OPENSL) || \ + defined(MA_HAS_WEBAUDIO) +static ma_result ma_device__handle_duplex_callback_capture(ma_device* pDevice, ma_uint32 frameCountInDeviceFormat, const void* pFramesInDeviceFormat, ma_pcm_rb* pRB) { ma_result result; + ma_uint32 totalDeviceFramesProcessed = 0; + const void* pRunningFramesInDeviceFormat = pFramesInDeviceFormat; - ma_assert(pDevice != NULL); - ma_assert(frameCount > 0); - ma_assert(pFramesInInternalFormat != NULL); - ma_assert(pRB != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCountInDeviceFormat > 0); + MA_ASSERT(pFramesInDeviceFormat != NULL); + MA_ASSERT(pRB != NULL); - pDevice->capture._dspFrameCount = (ma_uint32)frameCount; - pDevice->capture._dspFrames = (const ma_uint8*)pFramesInInternalFormat; - - /* Write to the ring buffer. The ring buffer is in the external format. */ + /* Write to the ring buffer. The ring buffer is in the client format which means we need to convert. */ for (;;) { - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = 256; - void* pFramesInExternalFormat; + ma_uint32 framesToProcessInDeviceFormat = (frameCountInDeviceFormat - totalDeviceFramesProcessed); + ma_uint32 framesToProcessInClientFormat = MA_DATA_CONVERTER_STACK_BUFFER_SIZE / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint64 framesProcessedInDeviceFormat; + ma_uint64 framesProcessedInClientFormat; + void* pFramesInClientFormat; - result = ma_pcm_rb_acquire_write(pRB, &framesToProcess, &pFramesInExternalFormat); + result = ma_pcm_rb_acquire_write(pRB, &framesToProcessInClientFormat, &pFramesInClientFormat); if (result != MA_SUCCESS) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to acquire capture PCM frames from ring buffer.", result); break; } - if (framesToProcess == 0) { + if (framesToProcessInClientFormat == 0) { if (ma_pcm_rb_pointer_disance(pRB) == (ma_int32)ma_pcm_rb_get_subbuffer_size(pRB)) { break; /* Overrun. Not enough room in the ring buffer for input frame. Excess frames are dropped. */ } } /* Convert. */ - framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, pFramesInExternalFormat, framesToProcess); + framesProcessedInDeviceFormat = framesToProcessInDeviceFormat; + framesProcessedInClientFormat = framesToProcessInClientFormat; + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningFramesInDeviceFormat, &framesProcessedInDeviceFormat, pFramesInClientFormat, &framesProcessedInClientFormat); + if (result != MA_SUCCESS) { + break; + } - result = ma_pcm_rb_commit_write(pRB, framesProcessed, pFramesInExternalFormat); + result = ma_pcm_rb_commit_write(pRB, (ma_uint32)framesProcessedInDeviceFormat, pFramesInClientFormat); /* Safe cast. */ if (result != MA_SUCCESS) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "Failed to commit capture PCM frames to ring buffer.", result); break; } - if (framesProcessed < framesToProcess) { + pRunningFramesInDeviceFormat = ma_offset_ptr(pRunningFramesInDeviceFormat, framesProcessedInDeviceFormat * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + totalDeviceFramesProcessed += (ma_uint32)framesProcessedInDeviceFormat; /* Safe cast. */ + + /* We're done when we're unable to process any client nor device frames. */ + if (framesProcessedInClientFormat == 0 && framesProcessedInDeviceFormat == 0) { break; /* Done. */ } } @@ -5936,27 +6374,28 @@ static MA_INLINE ma_result ma_device__handle_duplex_callback_capture(ma_device* return MA_SUCCESS; } -static MA_INLINE ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB) +static ma_result ma_device__handle_duplex_callback_playback(ma_device* pDevice, ma_uint32 frameCount, void* pFramesInInternalFormat, ma_pcm_rb* pRB) { ma_result result; - ma_uint8 playbackFramesInExternalFormat[4096]; - ma_uint8 silentInputFrames[4096]; + ma_uint8 playbackFramesInExternalFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 silentInputFrames[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 totalFramesToReadFromClient; ma_uint32 totalFramesReadFromClient; + ma_uint32 totalFramesReadOut = 0; - ma_assert(pDevice != NULL); - ma_assert(frameCount > 0); - ma_assert(pFramesInInternalFormat != NULL); - ma_assert(pRB != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(frameCount > 0); + MA_ASSERT(pFramesInInternalFormat != NULL); + MA_ASSERT(pRB != NULL); /* Sitting in the ring buffer should be captured data from the capture callback in external format. If there's not enough data in there for the whole frameCount frames we just use silence instead for the input data. */ - ma_zero_memory(silentInputFrames, sizeof(silentInputFrames)); + MA_ZERO_MEMORY(silentInputFrames, sizeof(silentInputFrames)); /* We need to calculate how many output frames are required to be read from the client to completely fill frameCount internal frames. */ - totalFramesToReadFromClient = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->playback.internalSampleRate, frameCount); /* ma_pcm_converter_get_required_input_frame_count(&pDevice->playback.converter, (ma_uint32)frameCount); */ + totalFramesToReadFromClient = (ma_uint32)ma_data_converter_get_required_input_frame_count(&pDevice->playback.converter, frameCount); totalFramesReadFromClient = 0; while (totalFramesReadFromClient < totalFramesToReadFromClient && ma_device_is_started(pDevice)) { ma_uint32 framesRemainingFromClient; @@ -5999,16 +6438,20 @@ static MA_INLINE ma_result ma_device__handle_duplex_callback_playback(ma_device* } /* We have samples in external format so now we need to convert to internal format and output to the device. */ - pDevice->playback._dspFrameCount = inputFrameCount; - pDevice->playback._dspFrames = (const ma_uint8*)playbackFramesInExternalFormat; - ma_pcm_converter_read(&pDevice->playback.converter, pFramesInInternalFormat, inputFrameCount); + { + ma_uint64 framesConvertedIn = inputFrameCount; + ma_uint64 framesConvertedOut = (frameCount - totalFramesReadOut); + ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackFramesInExternalFormat, &framesConvertedIn, pFramesInInternalFormat, &framesConvertedOut); - totalFramesReadFromClient += inputFrameCount; - pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, inputFrameCount * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + totalFramesReadFromClient += (ma_uint32)framesConvertedIn; /* Safe cast. */ + totalFramesReadOut += (ma_uint32)framesConvertedOut; /* Safe cast. */ + pFramesInInternalFormat = ma_offset_ptr(pFramesInInternalFormat, framesConvertedOut * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } } return MA_SUCCESS; } +#endif /* Asynchronous backends. */ /* A helper for changing the state of the device. */ static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_uint32 newState) @@ -6019,10 +6462,7 @@ static MA_INLINE void ma_device__set_state(ma_device* pDevice, ma_uint32 newStat /* A helper for getting the state of the device. */ static MA_INLINE ma_uint32 ma_device__get_state(ma_device* pDevice) { - ma_uint32 state; - ma_atomic_exchange_32(&state, pDevice->state); - - return state; + return pDevice->state; } @@ -6034,25 +6474,6 @@ static MA_INLINE ma_uint32 ma_device__get_state(ma_device* pDevice) #endif -ma_bool32 ma_context__device_id_equal(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) -{ - ma_assert(pContext != NULL); - - if (pID0 == pID1) return MA_TRUE; - - if ((pID0 == NULL && pID1 != NULL) || - (pID0 != NULL && pID1 == NULL)) { - return MA_FALSE; - } - - if (pContext->onDeviceIDEqual) { - return pContext->onDeviceIDEqual(pContext, pID0, pID1); - } - - return MA_FALSE; -} - - typedef struct { ma_device_type deviceType; @@ -6062,10 +6483,10 @@ typedef struct ma_bool32 foundDevice; } ma_context__try_get_device_name_by_id__enum_callback_data; -ma_bool32 ma_context__try_get_device_name_by_id__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) +static ma_bool32 ma_context__try_get_device_name_by_id__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) { ma_context__try_get_device_name_by_id__enum_callback_data* pData = (ma_context__try_get_device_name_by_id__enum_callback_data*)pUserData; - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); if (pData->deviceType == deviceType) { if (pContext->onDeviceIDEqual(pContext, pData->pDeviceID, &pDeviceInfo->id)) { @@ -6082,13 +6503,13 @@ Generic function for retrieving the name of a device by it's ID. This function simply enumerates every device and then retrieves the name of the first device that has the same ID. */ -ma_result ma_context__try_get_device_name_by_id(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, char* pName, size_t nameBufferSize) +static ma_result ma_context__try_get_device_name_by_id(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, char* pName, size_t nameBufferSize) { ma_result result; ma_context__try_get_device_name_by_id__enum_callback_data data; - ma_assert(pContext != NULL); - ma_assert(pName != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pName != NULL); if (pDeviceID == NULL) { return MA_NO_DEVICE; @@ -6125,7 +6546,7 @@ ma_uint32 ma_get_format_priority_index(ma_format format) /* Lower = better. */ return (ma_uint32)-1; } -void ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType); +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType); /******************************************************************************* @@ -6140,10 +6561,10 @@ Null Backend #define MA_DEVICE_OP_SUSPEND__NULL 2 #define MA_DEVICE_OP_KILL__NULL 3 -ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) +static ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) { ma_device* pDevice = (ma_device*)pData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); for (;;) { /* Keep the thread alive until the device is uninitialized. */ /* Wait for an operation to be requested. */ @@ -6193,7 +6614,7 @@ ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) /* Getting a signal on a "none" operation probably means an error. Return invalid operation. */ if (pDevice->null_device.operation == MA_DEVICE_OP_NONE__NULL) { - ma_assert(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */ + MA_ASSERT(MA_FALSE); /* <-- Trigger this in debug mode to ensure developers are aware they're doing something wrong (or there's a bug in a miniaudio). */ ma_atomic_exchange_32(&pDevice->null_device.operationResult, MA_INVALID_OPERATION); ma_event_signal(&pDevice->null_device.operationCompletionEvent); continue; /* Continue the loop. Don't terminate. */ @@ -6203,7 +6624,7 @@ ma_thread_result MA_THREADCALL ma_device_thread__null(void* pData) return (ma_thread_result)0; } -ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation) +static ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation) { ma_atomic_exchange_32(&pDevice->null_device.operation, operation); if (!ma_event_signal(&pDevice->null_device.operationEvent)) { @@ -6217,7 +6638,7 @@ ma_result ma_device_do_operation__null(ma_device* pDevice, ma_uint32 operation) return pDevice->null_device.operationResult; } -ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice) +static ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice) { ma_uint32 internalSampleRate; if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { @@ -6230,27 +6651,27 @@ ma_uint64 ma_device_get_total_run_time_in_frames__null(ma_device* pDevice) return (ma_uint64)((pDevice->null_device.priorRunTime + ma_timer_get_time_in_seconds(&pDevice->null_device.timer)) * internalSampleRate); } -ma_bool32 ma_context_is_device_id_equal__null(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__null(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return pID0->nullbackend == pID1->nullbackend; } -ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_bool32 cbResult = MA_TRUE; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); /* Playback. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Playback Device", (size_t)-1); cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); } @@ -6258,7 +6679,7 @@ ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devic /* Capture. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), "NULL Capture Device", (size_t)-1); cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); } @@ -6266,11 +6687,11 @@ ma_result ma_context_enumerate_devices__null(ma_context* pContext, ma_enum_devic return MA_SUCCESS; } -ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_uint32 iFormat; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (pDeviceID != NULL && pDeviceID->nullbackend != 0) { return MA_NO_DEVICE; /* Don't know the device. */ @@ -6300,9 +6721,9 @@ ma_result ma_context_get_device_info__null(ma_context* pContext, ma_device_type } -void ma_device_uninit__null(ma_device* pDevice) +static void ma_device_uninit__null(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* Keep it clean and wait for the device thread to finish before returning. */ ma_device_do_operation__null(pDevice, MA_DEVICE_OP_KILL__NULL); @@ -6312,14 +6733,14 @@ void ma_device_uninit__null(ma_device* pDevice) ma_event_uninit(&pDevice->null_device.operationEvent); } -ma_result ma_device_init__null(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__null(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result; ma_uint32 bufferSizeInFrames; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); - ma_zero_object(&pDevice->null_device); + MA_ZERO_OBJECT(&pDevice->null_device); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -6369,9 +6790,9 @@ ma_result ma_device_init__null(ma_context* pContext, const ma_device_config* pCo return MA_SUCCESS; } -ma_result ma_device_start__null(ma_device* pDevice) +static ma_result ma_device_start__null(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); ma_device_do_operation__null(pDevice, MA_DEVICE_OP_START__NULL); @@ -6379,9 +6800,9 @@ ma_result ma_device_start__null(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_stop__null(ma_device* pDevice) +static ma_result ma_device_stop__null(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); ma_device_do_operation__null(pDevice, MA_DEVICE_OP_SUSPEND__NULL); @@ -6389,7 +6810,7 @@ ma_result ma_device_stop__null(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { ma_result result = MA_SUCCESS; ma_uint32 totalPCMFramesProcessed; @@ -6434,7 +6855,7 @@ ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_u } /* If we've consumed the whole buffer we can return now. */ - ma_assert(totalPCMFramesProcessed <= frameCount); + MA_ASSERT(totalPCMFramesProcessed <= frameCount); if (totalPCMFramesProcessed == frameCount) { break; } @@ -6469,7 +6890,7 @@ ma_result ma_device_write__null(ma_device* pDevice, const void* pPCMFrames, ma_u return result; } -ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) { ma_result result = MA_SUCCESS; ma_uint32 totalPCMFramesProcessed; @@ -6493,7 +6914,7 @@ ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 f } /* We need to ensured the output buffer is zeroed. */ - ma_zero_memory(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf); + MA_ZERO_MEMORY(ma_offset_ptr(pPCMFrames, totalPCMFramesProcessed*bpf), framesToProcess*bpf); pDevice->null_device.currentPeriodFramesRemainingCapture -= framesToProcess; totalPCMFramesProcessed += framesToProcess; @@ -6505,7 +6926,7 @@ ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 f } /* If we've consumed the whole buffer we can return now. */ - ma_assert(totalPCMFramesProcessed <= frameCount); + MA_ASSERT(totalPCMFramesProcessed <= frameCount); if (totalPCMFramesProcessed == frameCount) { break; } @@ -6540,12 +6961,12 @@ ma_result ma_device_read__null(ma_device* pDevice, void* pPCMFrames, ma_uint32 f return result; } -ma_result ma_device_main_loop__null(ma_device* pDevice) +static ma_result ma_device_main_loop__null(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* The capture device needs to be started immediately. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { @@ -6561,77 +6982,89 @@ ma_result ma_device_main_loop__null(ma_device* pDevice) case ma_device_type_duplex: { /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__null(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__null(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; + /* At this point we have our captured data in device format and we now need to convert it to client format. */ for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__null(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__null(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__null()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } break; @@ -6702,18 +7135,18 @@ ma_result ma_device_main_loop__null(ma_device* pDevice) return result; } -ma_result ma_context_uninit__null(ma_context* pContext) +static ma_result ma_context_uninit__null(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_null); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_null); (void)pContext; return MA_SUCCESS; } -ma_result ma_context_init__null(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__null(const ma_context_config* pConfig, ma_context* pContext) { - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)pConfig; @@ -6820,10 +7253,10 @@ typedef struct #define WAVE_FORMAT_IEEE_FLOAT 0x0003 #endif -GUID MA_GUID_NULL = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}; +static GUID MA_GUID_NULL = {0x00000000, 0x0000, 0x0000, {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}; /* Converts an individual Win32-style channel identifier (SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ -ma_uint8 ma_channel_id_to_ma__win32(DWORD id) +static ma_uint8 ma_channel_id_to_ma__win32(DWORD id) { switch (id) { @@ -6850,7 +7283,7 @@ ma_uint8 ma_channel_id_to_ma__win32(DWORD id) } /* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to Win32-style. */ -DWORD ma_channel_id_to_win32(DWORD id) +static DWORD ma_channel_id_to_win32(DWORD id) { switch (id) { @@ -6878,7 +7311,7 @@ DWORD ma_channel_id_to_win32(DWORD id) } /* Converts a channel mapping to a Win32-style channel mask. */ -DWORD ma_channel_map_to_channel_mask__win32(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels) +static DWORD ma_channel_map_to_channel_mask__win32(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels) { DWORD dwChannelMask = 0; ma_uint32 iChannel; @@ -6891,7 +7324,7 @@ DWORD ma_channel_map_to_channel_mask__win32(const ma_channel channelMap[MA_MAX_C } /* Converts a Win32-style channel mask to a miniaudio channel map. */ -void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +static void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { if (channels == 1 && dwChannelMask == 0) { channelMap[0] = MA_CHANNEL_MONO; @@ -6919,7 +7352,7 @@ void ma_channel_mask_to_channel_map__win32(DWORD dwChannelMask, ma_uint32 channe } #ifdef __cplusplus -ma_bool32 ma_is_guid_equal(const void* a, const void* b) +static ma_bool32 ma_is_guid_equal(const void* a, const void* b) { return IsEqualGUID(*(const GUID*)a, *(const GUID*)b); } @@ -6927,9 +7360,9 @@ ma_bool32 ma_is_guid_equal(const void* a, const void* b) #define ma_is_guid_equal(a, b) IsEqualGUID((const GUID*)a, (const GUID*)b) #endif -ma_format ma_format_from_WAVEFORMATEX(const WAVEFORMATEX* pWF) +static ma_format ma_format_from_WAVEFORMATEX(const WAVEFORMATEX* pWF) { - ma_assert(pWF != NULL); + MA_ASSERT(pWF != NULL); if (pWF->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { const WAVEFORMATEXTENSIBLE* pWFEX = (const WAVEFORMATEXTENSIBLE*)pWF; @@ -7010,9 +7443,6 @@ WASAPI Backend #endif #endif /* 0 */ - - - /* Some compilers don't define VerifyVersionInfoW. Need to write this ourselves. */ #define MA_WIN32_WINNT_VISTA 0x0600 #define MA_VER_MINORVERSION 0x01 @@ -7050,30 +7480,30 @@ typedef struct /* Some compilers don't define PropVariantInit(). We just do this ourselves since it's just a memset(). */ static MA_INLINE void ma_PropVariantInit(PROPVARIANT* pProp) { - ma_zero_object(pProp); + MA_ZERO_OBJECT(pProp); } -const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14}; -const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0}; +static const PROPERTYKEY MA_PKEY_Device_FriendlyName = {{0xA45C254E, 0xDF1C, 0x4EFD, {0x80, 0x20, 0x67, 0xD1, 0x46, 0xA8, 0x50, 0xE0}}, 14}; +static const PROPERTYKEY MA_PKEY_AudioEngine_DeviceFormat = {{0xF19F064D, 0x82C, 0x4E27, {0xBC, 0x73, 0x68, 0x82, 0xA1, 0xBB, 0x8E, 0x4C}}, 0}; -const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */ -const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */ +static const IID MA_IID_IUnknown = {0x00000000, 0x0000, 0x0000, {0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}}; /* 00000000-0000-0000-C000-000000000046 */ +static const IID MA_IID_IAgileObject = {0x94EA2B94, 0xE9CC, 0x49E0, {0xC0, 0xFF, 0xEE, 0x64, 0xCA, 0x8F, 0x5B, 0x90}}; /* 94EA2B94-E9CC-49E0-C0FF-EE64CA8F5B90 */ -const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */ -const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */ -const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */ -const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */ -const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */ -const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */ +static const IID MA_IID_IAudioClient = {0x1CB9AD4C, 0xDBFA, 0x4C32, {0xB1, 0x78, 0xC2, 0xF5, 0x68, 0xA7, 0x03, 0xB2}}; /* 1CB9AD4C-DBFA-4C32-B178-C2F568A703B2 = __uuidof(IAudioClient) */ +static const IID MA_IID_IAudioClient2 = {0x726778CD, 0xF60A, 0x4EDA, {0x82, 0xDE, 0xE4, 0x76, 0x10, 0xCD, 0x78, 0xAA}}; /* 726778CD-F60A-4EDA-82DE-E47610CD78AA = __uuidof(IAudioClient2) */ +static const IID MA_IID_IAudioClient3 = {0x7ED4EE07, 0x8E67, 0x4CD4, {0x8C, 0x1A, 0x2B, 0x7A, 0x59, 0x87, 0xAD, 0x42}}; /* 7ED4EE07-8E67-4CD4-8C1A-2B7A5987AD42 = __uuidof(IAudioClient3) */ +static const IID MA_IID_IAudioRenderClient = {0xF294ACFC, 0x3146, 0x4483, {0xA7, 0xBF, 0xAD, 0xDC, 0xA7, 0xC2, 0x60, 0xE2}}; /* F294ACFC-3146-4483-A7BF-ADDCA7C260E2 = __uuidof(IAudioRenderClient) */ +static const IID MA_IID_IAudioCaptureClient = {0xC8ADBD64, 0xE71E, 0x48A0, {0xA4, 0xDE, 0x18, 0x5C, 0x39, 0x5C, 0xD3, 0x17}}; /* C8ADBD64-E71E-48A0-A4DE-185C395CD317 = __uuidof(IAudioCaptureClient) */ +static const IID MA_IID_IMMNotificationClient = {0x7991EEC9, 0x7E89, 0x4D85, {0x83, 0x90, 0x6C, 0x70, 0x3C, 0xEC, 0x60, 0xC0}}; /* 7991EEC9-7E89-4D85-8390-6C703CEC60C0 = __uuidof(IMMNotificationClient) */ #ifndef MA_WIN32_DESKTOP -const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */ -const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */ -const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */ +static const IID MA_IID_DEVINTERFACE_AUDIO_RENDER = {0xE6327CAD, 0xDCEC, 0x4949, {0xAE, 0x8A, 0x99, 0x1E, 0x97, 0x6A, 0x79, 0xD2}}; /* E6327CAD-DCEC-4949-AE8A-991E976A79D2 */ +static const IID MA_IID_DEVINTERFACE_AUDIO_CAPTURE = {0x2EEF81BE, 0x33FA, 0x4800, {0x96, 0x70, 0x1C, 0xD4, 0x74, 0x97, 0x2C, 0x3F}}; /* 2EEF81BE-33FA-4800-9670-1CD474972C3F */ +static const IID MA_IID_IActivateAudioInterfaceCompletionHandler = {0x41D949AB, 0x9862, 0x444A, {0x80, 0xF6, 0xC2, 0x61, 0x33, 0x4D, 0xA5, 0xEB}}; /* 41D949AB-9862-444A-80F6-C261334DA5EB */ #endif -const IID MA_CLSID_MMDeviceEnumerator_Instance = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */ -const IID MA_IID_IMMDeviceEnumerator_Instance = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */ +static const IID MA_CLSID_MMDeviceEnumerator_Instance = {0xBCDE0395, 0xE52F, 0x467C, {0x8E, 0x3D, 0xC4, 0x57, 0x92, 0x91, 0x69, 0x2E}}; /* BCDE0395-E52F-467C-8E3D-C4579291692E = __uuidof(MMDeviceEnumerator) */ +static const IID MA_IID_IMMDeviceEnumerator_Instance = {0xA95664D2, 0x9614, 0x4F35, {0xA7, 0x46, 0xDE, 0x8D, 0xB6, 0x36, 0x17, 0xE6}}; /* A95664D2-9614-4F35-A746-DE8DB63617E6 = __uuidof(IMMDeviceEnumerator) */ #ifdef __cplusplus #define MA_CLSID_MMDeviceEnumerator MA_CLSID_MMDeviceEnumerator_Instance #define MA_IID_IMMDeviceEnumerator MA_IID_IMMDeviceEnumerator_Instance @@ -7171,9 +7601,9 @@ struct ma_IUnknown { ma_IUnknownVtbl* lpVtbl; }; -HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IUnknown_QueryInterface(ma_IUnknown* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IUnknown_AddRef(ma_IUnknown* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { return pThis->lpVtbl->Release(pThis); } #ifdef MA_WIN32_DESKTOP /* IMMNotificationClient */ @@ -7211,14 +7641,14 @@ ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { ma_IMMDeviceEnumeratorVtbl* lpVtbl; }; - HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } - ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); } - ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); } - HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); } - HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); } - HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); } - HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); } - HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); } + static HRESULT ma_IMMDeviceEnumerator_QueryInterface(ma_IMMDeviceEnumerator* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static ULONG ma_IMMDeviceEnumerator_AddRef(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static ULONG ma_IMMDeviceEnumerator_Release(ma_IMMDeviceEnumerator* pThis) { return pThis->lpVtbl->Release(pThis); } + static HRESULT ma_IMMDeviceEnumerator_EnumAudioEndpoints(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, DWORD dwStateMask, ma_IMMDeviceCollection** ppDevices) { return pThis->lpVtbl->EnumAudioEndpoints(pThis, dataFlow, dwStateMask, ppDevices); } + static HRESULT ma_IMMDeviceEnumerator_GetDefaultAudioEndpoint(ma_IMMDeviceEnumerator* pThis, ma_EDataFlow dataFlow, ma_ERole role, ma_IMMDevice** ppEndpoint) { return pThis->lpVtbl->GetDefaultAudioEndpoint(pThis, dataFlow, role, ppEndpoint); } + static HRESULT ma_IMMDeviceEnumerator_GetDevice(ma_IMMDeviceEnumerator* pThis, LPCWSTR pID, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->GetDevice(pThis, pID, ppDevice); } + static HRESULT ma_IMMDeviceEnumerator_RegisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->RegisterEndpointNotificationCallback(pThis, pClient); } + static HRESULT ma_IMMDeviceEnumerator_UnregisterEndpointNotificationCallback(ma_IMMDeviceEnumerator* pThis, ma_IMMNotificationClient* pClient) { return pThis->lpVtbl->UnregisterEndpointNotificationCallback(pThis, pClient); } /* IMMDeviceCollection */ @@ -7237,11 +7667,11 @@ ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { ma_IMMDeviceCollectionVtbl* lpVtbl; }; - HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } - ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); } - ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); } - HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); } - HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); } + static HRESULT ma_IMMDeviceCollection_QueryInterface(ma_IMMDeviceCollection* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static ULONG ma_IMMDeviceCollection_AddRef(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static ULONG ma_IMMDeviceCollection_Release(ma_IMMDeviceCollection* pThis) { return pThis->lpVtbl->Release(pThis); } + static HRESULT ma_IMMDeviceCollection_GetCount(ma_IMMDeviceCollection* pThis, UINT* pDevices) { return pThis->lpVtbl->GetCount(pThis, pDevices); } + static HRESULT ma_IMMDeviceCollection_Item(ma_IMMDeviceCollection* pThis, UINT nDevice, ma_IMMDevice** ppDevice) { return pThis->lpVtbl->Item(pThis, nDevice, ppDevice); } /* IMMDevice */ @@ -7262,13 +7692,13 @@ ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { ma_IMMDeviceVtbl* lpVtbl; }; - HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } - ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); } - ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); } - HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); } - HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); } - HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, LPWSTR *pID) { return pThis->lpVtbl->GetId(pThis, pID); } - HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); } + static HRESULT ma_IMMDevice_QueryInterface(ma_IMMDevice* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static ULONG ma_IMMDevice_AddRef(ma_IMMDevice* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static ULONG ma_IMMDevice_Release(ma_IMMDevice* pThis) { return pThis->lpVtbl->Release(pThis); } + static HRESULT ma_IMMDevice_Activate(ma_IMMDevice* pThis, const IID* const iid, DWORD dwClsCtx, PROPVARIANT* pActivationParams, void** ppInterface) { return pThis->lpVtbl->Activate(pThis, iid, dwClsCtx, pActivationParams, ppInterface); } + static HRESULT ma_IMMDevice_OpenPropertyStore(ma_IMMDevice* pThis, DWORD stgmAccess, ma_IPropertyStore** ppProperties) { return pThis->lpVtbl->OpenPropertyStore(pThis, stgmAccess, ppProperties); } + static HRESULT ma_IMMDevice_GetId(ma_IMMDevice* pThis, LPWSTR *pID) { return pThis->lpVtbl->GetId(pThis, pID); } + static HRESULT ma_IMMDevice_GetState(ma_IMMDevice* pThis, DWORD *pState) { return pThis->lpVtbl->GetState(pThis, pState); } #else /* IActivateAudioInterfaceAsyncOperation */ typedef struct @@ -7285,10 +7715,10 @@ ULONG ma_IUnknown_Release(ma_IUnknown* pThis) { ma_IActivateAudioInterfaceAsyncOperationVtbl* lpVtbl; }; - HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } - ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); } - ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); } - HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); } + static HRESULT ma_IActivateAudioInterfaceAsyncOperation_QueryInterface(ma_IActivateAudioInterfaceAsyncOperation* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } + static ULONG ma_IActivateAudioInterfaceAsyncOperation_AddRef(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->AddRef(pThis); } + static ULONG ma_IActivateAudioInterfaceAsyncOperation_Release(ma_IActivateAudioInterfaceAsyncOperation* pThis) { return pThis->lpVtbl->Release(pThis); } + static HRESULT ma_IActivateAudioInterfaceAsyncOperation_GetActivateResult(ma_IActivateAudioInterfaceAsyncOperation* pThis, HRESULT *pActivateResult, ma_IUnknown** ppActivatedInterface) { return pThis->lpVtbl->GetActivateResult(pThis, pActivateResult, ppActivatedInterface); } #endif /* IPropertyStore */ @@ -7310,14 +7740,14 @@ struct ma_IPropertyStore { ma_IPropertyStoreVtbl* lpVtbl; }; -HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); } -HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); } -HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); } -HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); } -HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); } +static HRESULT ma_IPropertyStore_QueryInterface(ma_IPropertyStore* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IPropertyStore_AddRef(ma_IPropertyStore* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IPropertyStore_Release(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IPropertyStore_GetCount(ma_IPropertyStore* pThis, DWORD* pPropCount) { return pThis->lpVtbl->GetCount(pThis, pPropCount); } +static HRESULT ma_IPropertyStore_GetAt(ma_IPropertyStore* pThis, DWORD propIndex, PROPERTYKEY* pPropKey) { return pThis->lpVtbl->GetAt(pThis, propIndex, pPropKey); } +static HRESULT ma_IPropertyStore_GetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, PROPVARIANT* pPropVar) { return pThis->lpVtbl->GetValue(pThis, pKey, pPropVar); } +static HRESULT ma_IPropertyStore_SetValue(ma_IPropertyStore* pThis, const PROPERTYKEY* const pKey, const PROPVARIANT* const pPropVar) { return pThis->lpVtbl->SetValue(pThis, pKey, pPropVar); } +static HRESULT ma_IPropertyStore_Commit(ma_IPropertyStore* pThis) { return pThis->lpVtbl->Commit(pThis); } /* IAudioClient */ @@ -7346,21 +7776,21 @@ struct ma_IAudioClient { ma_IAudioClientVtbl* lpVtbl; }; -HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } -HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } -HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } -HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } -HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } -HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } -HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } -HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); } -HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); } -HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); } -HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } -HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static HRESULT ma_IAudioClient_QueryInterface(ma_IAudioClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IAudioClient_AddRef(ma_IAudioClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IAudioClient_Release(ma_IAudioClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IAudioClient_Initialize(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static HRESULT ma_IAudioClient_GetBufferSize(ma_IAudioClient* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static HRESULT ma_IAudioClient_GetStreamLatency(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static HRESULT ma_IAudioClient_GetCurrentPadding(ma_IAudioClient* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static HRESULT ma_IAudioClient_IsFormatSupported(ma_IAudioClient* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static HRESULT ma_IAudioClient_GetMixFormat(ma_IAudioClient* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static HRESULT ma_IAudioClient_GetDevicePeriod(ma_IAudioClient* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static HRESULT ma_IAudioClient_Start(ma_IAudioClient* pThis) { return pThis->lpVtbl->Start(pThis); } +static HRESULT ma_IAudioClient_Stop(ma_IAudioClient* pThis) { return pThis->lpVtbl->Stop(pThis); } +static HRESULT ma_IAudioClient_Reset(ma_IAudioClient* pThis) { return pThis->lpVtbl->Reset(pThis); } +static HRESULT ma_IAudioClient_SetEventHandle(ma_IAudioClient* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static HRESULT ma_IAudioClient_GetService(ma_IAudioClient* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } /* IAudioClient2 */ typedef struct @@ -7393,24 +7823,24 @@ struct ma_IAudioClient2 { ma_IAudioClient2Vtbl* lpVtbl; }; -HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } -HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } -HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } -HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } -HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } -HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } -HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } -HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); } -HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); } -HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); } -HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } -HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } -HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } -HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } -HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } +static HRESULT ma_IAudioClient2_QueryInterface(ma_IAudioClient2* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IAudioClient2_AddRef(ma_IAudioClient2* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IAudioClient2_Release(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IAudioClient2_Initialize(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static HRESULT ma_IAudioClient2_GetBufferSize(ma_IAudioClient2* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static HRESULT ma_IAudioClient2_GetStreamLatency(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static HRESULT ma_IAudioClient2_GetCurrentPadding(ma_IAudioClient2* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static HRESULT ma_IAudioClient2_IsFormatSupported(ma_IAudioClient2* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static HRESULT ma_IAudioClient2_GetMixFormat(ma_IAudioClient2* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static HRESULT ma_IAudioClient2_GetDevicePeriod(ma_IAudioClient2* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static HRESULT ma_IAudioClient2_Start(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Start(pThis); } +static HRESULT ma_IAudioClient2_Stop(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Stop(pThis); } +static HRESULT ma_IAudioClient2_Reset(ma_IAudioClient2* pThis) { return pThis->lpVtbl->Reset(pThis); } +static HRESULT ma_IAudioClient2_SetEventHandle(ma_IAudioClient2* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static HRESULT ma_IAudioClient2_GetService(ma_IAudioClient2* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static HRESULT ma_IAudioClient2_IsOffloadCapable(ma_IAudioClient2* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static HRESULT ma_IAudioClient2_SetClientProperties(ma_IAudioClient2* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static HRESULT ma_IAudioClient2_GetBufferSizeLimits(ma_IAudioClient2* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } /* IAudioClient3 */ @@ -7449,27 +7879,27 @@ struct ma_IAudioClient3 { ma_IAudioClient3Vtbl* lpVtbl; }; -HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } -HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } -HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } -HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } -HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } -HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } -HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } -HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); } -HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); } -HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); } -HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } -HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } -HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } -HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } -HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } -HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); } -HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); } -HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); } +static HRESULT ma_IAudioClient3_QueryInterface(ma_IAudioClient3* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IAudioClient3_AddRef(ma_IAudioClient3* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IAudioClient3_Release(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IAudioClient3_Initialize(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, DWORD streamFlags, MA_REFERENCE_TIME bufferDuration, MA_REFERENCE_TIME periodicity, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGuid) { return pThis->lpVtbl->Initialize(pThis, shareMode, streamFlags, bufferDuration, periodicity, pFormat, pAudioSessionGuid); } +static HRESULT ma_IAudioClient3_GetBufferSize(ma_IAudioClient3* pThis, ma_uint32* pNumBufferFrames) { return pThis->lpVtbl->GetBufferSize(pThis, pNumBufferFrames); } +static HRESULT ma_IAudioClient3_GetStreamLatency(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pLatency) { return pThis->lpVtbl->GetStreamLatency(pThis, pLatency); } +static HRESULT ma_IAudioClient3_GetCurrentPadding(ma_IAudioClient3* pThis, ma_uint32* pNumPaddingFrames) { return pThis->lpVtbl->GetCurrentPadding(pThis, pNumPaddingFrames); } +static HRESULT ma_IAudioClient3_IsFormatSupported(ma_IAudioClient3* pThis, MA_AUDCLNT_SHAREMODE shareMode, const WAVEFORMATEX* pFormat, WAVEFORMATEX** ppClosestMatch) { return pThis->lpVtbl->IsFormatSupported(pThis, shareMode, pFormat, ppClosestMatch); } +static HRESULT ma_IAudioClient3_GetMixFormat(ma_IAudioClient3* pThis, WAVEFORMATEX** ppDeviceFormat) { return pThis->lpVtbl->GetMixFormat(pThis, ppDeviceFormat); } +static HRESULT ma_IAudioClient3_GetDevicePeriod(ma_IAudioClient3* pThis, MA_REFERENCE_TIME* pDefaultDevicePeriod, MA_REFERENCE_TIME* pMinimumDevicePeriod) { return pThis->lpVtbl->GetDevicePeriod(pThis, pDefaultDevicePeriod, pMinimumDevicePeriod); } +static HRESULT ma_IAudioClient3_Start(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Start(pThis); } +static HRESULT ma_IAudioClient3_Stop(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Stop(pThis); } +static HRESULT ma_IAudioClient3_Reset(ma_IAudioClient3* pThis) { return pThis->lpVtbl->Reset(pThis); } +static HRESULT ma_IAudioClient3_SetEventHandle(ma_IAudioClient3* pThis, HANDLE eventHandle) { return pThis->lpVtbl->SetEventHandle(pThis, eventHandle); } +static HRESULT ma_IAudioClient3_GetService(ma_IAudioClient3* pThis, const IID* const riid, void** pp) { return pThis->lpVtbl->GetService(pThis, riid, pp); } +static HRESULT ma_IAudioClient3_IsOffloadCapable(ma_IAudioClient3* pThis, MA_AUDIO_STREAM_CATEGORY category, BOOL* pOffloadCapable) { return pThis->lpVtbl->IsOffloadCapable(pThis, category, pOffloadCapable); } +static HRESULT ma_IAudioClient3_SetClientProperties(ma_IAudioClient3* pThis, const ma_AudioClientProperties* pProperties) { return pThis->lpVtbl->SetClientProperties(pThis, pProperties); } +static HRESULT ma_IAudioClient3_GetBufferSizeLimits(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, BOOL eventDriven, MA_REFERENCE_TIME* pMinBufferDuration, MA_REFERENCE_TIME* pMaxBufferDuration) { return pThis->lpVtbl->GetBufferSizeLimits(pThis, pFormat, eventDriven, pMinBufferDuration, pMaxBufferDuration); } +static HRESULT ma_IAudioClient3_GetSharedModeEnginePeriod(ma_IAudioClient3* pThis, const WAVEFORMATEX* pFormat, UINT32* pDefaultPeriodInFrames, UINT32* pFundamentalPeriodInFrames, UINT32* pMinPeriodInFrames, UINT32* pMaxPeriodInFrames) { return pThis->lpVtbl->GetSharedModeEnginePeriod(pThis, pFormat, pDefaultPeriodInFrames, pFundamentalPeriodInFrames, pMinPeriodInFrames, pMaxPeriodInFrames); } +static HRESULT ma_IAudioClient3_GetCurrentSharedModeEnginePeriod(ma_IAudioClient3* pThis, WAVEFORMATEX** ppFormat, UINT32* pCurrentPeriodInFrames) { return pThis->lpVtbl->GetCurrentSharedModeEnginePeriod(pThis, ppFormat, pCurrentPeriodInFrames); } +static HRESULT ma_IAudioClient3_InitializeSharedAudioStream(ma_IAudioClient3* pThis, DWORD streamFlags, UINT32 periodInFrames, const WAVEFORMATEX* pFormat, const GUID* pAudioSessionGUID) { return pThis->lpVtbl->InitializeSharedAudioStream(pThis, streamFlags, periodInFrames, pFormat, pAudioSessionGUID); } /* IAudioRenderClient */ @@ -7488,11 +7918,11 @@ struct ma_IAudioRenderClient { ma_IAudioRenderClientVtbl* lpVtbl; }; -HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); } -HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); } +static HRESULT ma_IAudioRenderClient_QueryInterface(ma_IAudioRenderClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IAudioRenderClient_AddRef(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IAudioRenderClient_Release(ma_IAudioRenderClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IAudioRenderClient_GetBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesRequested, BYTE** ppData) { return pThis->lpVtbl->GetBuffer(pThis, numFramesRequested, ppData); } +static HRESULT ma_IAudioRenderClient_ReleaseBuffer(ma_IAudioRenderClient* pThis, ma_uint32 numFramesWritten, DWORD dwFlags) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesWritten, dwFlags); } /* IAudioCaptureClient */ @@ -7512,12 +7942,12 @@ struct ma_IAudioCaptureClient { ma_IAudioCaptureClientVtbl* lpVtbl; }; -HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); } -HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); } -HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); } +static HRESULT ma_IAudioCaptureClient_QueryInterface(ma_IAudioCaptureClient* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IAudioCaptureClient_AddRef(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IAudioCaptureClient_Release(ma_IAudioCaptureClient* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IAudioCaptureClient_GetBuffer(ma_IAudioCaptureClient* pThis, BYTE** ppData, ma_uint32* pNumFramesToRead, DWORD* pFlags, ma_uint64* pDevicePosition, ma_uint64* pQPCPosition) { return pThis->lpVtbl->GetBuffer(pThis, ppData, pNumFramesToRead, pFlags, pDevicePosition, pQPCPosition); } +static HRESULT ma_IAudioCaptureClient_ReleaseBuffer(ma_IAudioCaptureClient* pThis, ma_uint32 numFramesRead) { return pThis->lpVtbl->ReleaseBuffer(pThis, numFramesRead); } +static HRESULT ma_IAudioCaptureClient_GetNextPacketSize(ma_IAudioCaptureClient* pThis, ma_uint32* pNumFramesInNextPacket) { return pThis->lpVtbl->GetNextPacketSize(pThis, pNumFramesInNextPacket); } #ifndef MA_WIN32_DESKTOP #include @@ -7540,7 +7970,7 @@ struct ma_completion_handler_uwp HANDLE hEvent; }; -HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject) +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion_handler_uwp* pThis, const IID* const riid, void** ppObject) { /* We need to "implement" IAgileObject which is just an indicator that's used internally by WASAPI for some multithreading management. To @@ -7557,12 +7987,12 @@ HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_QueryInterface(ma_completion return S_OK; } -ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis) +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_AddRef(ma_completion_handler_uwp* pThis) { return (ULONG)ma_atomic_increment_32(&pThis->counter); } -ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis) +static ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_uwp* pThis) { ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter); if (newRefCount == 0) { @@ -7572,7 +8002,7 @@ ULONG STDMETHODCALLTYPE ma_completion_handler_uwp_Release(ma_completion_handler_ return (ULONG)newRefCount; } -HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation) +static HRESULT STDMETHODCALLTYPE ma_completion_handler_uwp_ActivateCompleted(ma_completion_handler_uwp* pThis, ma_IActivateAudioInterfaceAsyncOperation* pActivateOperation) { (void)pActivateOperation; SetEvent(pThis->hEvent); @@ -7587,10 +8017,10 @@ static ma_completion_handler_uwp_vtbl g_maCompletionHandlerVtblInstance = { ma_completion_handler_uwp_ActivateCompleted }; -ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler) +static ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler) { - ma_assert(pHandler != NULL); - ma_zero_object(pHandler); + MA_ASSERT(pHandler != NULL); + MA_ZERO_OBJECT(pHandler); pHandler->lpVtbl = &g_maCompletionHandlerVtblInstance; pHandler->counter = 1; @@ -7602,14 +8032,14 @@ ma_result ma_completion_handler_uwp_init(ma_completion_handler_uwp* pHandler) return MA_SUCCESS; } -void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler) +static void ma_completion_handler_uwp_uninit(ma_completion_handler_uwp* pHandler) { if (pHandler->hEvent != NULL) { CloseHandle(pHandler->hEvent); } } -void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler) +static void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler) { WaitForSingleObject(pHandler->hEvent, INFINITE); } @@ -7617,7 +8047,7 @@ void ma_completion_handler_uwp_wait(ma_completion_handler_uwp* pHandler) /* We need a virtual table for our notification client object that's used for detecting changes to the default device. */ #ifdef MA_WIN32_DESKTOP -HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotificationClient* pThis, const IID* const riid, void** ppObject) { /* We care about two interfaces - IUnknown and IMMNotificationClient. If the requested IID is something else @@ -7634,12 +8064,12 @@ HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_QueryInterface(ma_IMMNotifica return S_OK; } -ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis) +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_AddRef(ma_IMMNotificationClient* pThis) { return (ULONG)ma_atomic_increment_32(&pThis->counter); } -ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis) +static ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClient* pThis) { ma_uint32 newRefCount = ma_atomic_decrement_32(&pThis->counter); if (newRefCount == 0) { @@ -7650,7 +8080,7 @@ ULONG STDMETHODCALLTYPE ma_IMMNotificationClient_Release(ma_IMMNotificationClien } -HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, DWORD dwNewState) { #ifdef MA_DEBUG_OUTPUT printf("IMMNotificationClient_OnDeviceStateChanged(pDeviceID=%S, dwNewState=%u)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)", (unsigned int)dwNewState); @@ -7662,7 +8092,7 @@ HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceStateChanged(ma_IMMNo return S_OK; } -HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID) { #ifdef MA_DEBUG_OUTPUT printf("IMMNotificationClient_OnDeviceAdded(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)"); @@ -7674,7 +8104,7 @@ HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceAdded(ma_IMMNotificat return S_OK; } -HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID) { #ifdef MA_DEBUG_OUTPUT printf("IMMNotificationClient_OnDeviceRemoved(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)"); @@ -7686,7 +8116,7 @@ HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDeviceRemoved(ma_IMMNotific return S_OK; } -HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMMNotificationClient* pThis, ma_EDataFlow dataFlow, ma_ERole role, LPCWSTR pDefaultDeviceID) { #ifdef MA_DEBUG_OUTPUT printf("IMMNotificationClient_OnDefaultDeviceChanged(dataFlow=%d, role=%d, pDefaultDeviceID=%S)\n", dataFlow, role, (pDefaultDeviceID != NULL) ? pDefaultDeviceID : L"(NULL)"); @@ -7703,6 +8133,12 @@ HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMM return S_OK; } + /* Don't do automatic stream routing if we're not allowed. */ + if ((dataFlow == ma_eRender && pThis->pDevice->wasapi.allowPlaybackAutoStreamRouting == MA_FALSE) || + (dataFlow == ma_eCapture && pThis->pDevice->wasapi.allowCaptureAutoStreamRouting == MA_FALSE)) { + return S_OK; + } + /* Not currently supporting automatic stream routing in exclusive mode. This is not working correctly on my machine due to AUDCLNT_E_DEVICE_IN_USE errors when reinitializing the device. If this is a bug in miniaudio, we can try re-enabling this once @@ -7729,7 +8165,7 @@ HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnDefaultDeviceChanged(ma_IMM return S_OK; } -HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key) +static HRESULT STDMETHODCALLTYPE ma_IMMNotificationClient_OnPropertyValueChanged(ma_IMMNotificationClient* pThis, LPCWSTR pDeviceID, const PROPERTYKEY key) { #ifdef MA_DEBUG_OUTPUT printf("IMMNotificationClient_OnPropertyValueChanged(pDeviceID=%S)\n", (pDeviceID != NULL) ? pDeviceID : L"(NULL)"); @@ -7761,20 +8197,20 @@ typedef ma_IUnknown ma_WASAPIDeviceInterface; -ma_bool32 ma_context_is_device_id_equal__wasapi(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__wasapi(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return memcmp(pID0->wasapi, pID1->wasapi, sizeof(pID0->wasapi)) == 0; } -void ma_set_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_device_info* pInfo) +static void ma_set_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_device_info* pInfo) { - ma_assert(pWF != NULL); - ma_assert(pInfo != NULL); + MA_ASSERT(pWF != NULL); + MA_ASSERT(pInfo != NULL); pInfo->formatCount = 1; pInfo->formats[0] = ma_format_from_WAVEFORMATEX(pWF); @@ -7784,10 +8220,10 @@ void ma_set_device_info_from_WAVEFORMATEX(const WAVEFORMATEX* pWF, ma_device_inf pInfo->maxSampleRate = pWF->nSamplesPerSec; } -ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_share_mode shareMode, ma_device_info* pInfo) +static ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pContext, /*ma_IMMDevice**/void* pMMDevice, ma_IAudioClient* pAudioClient, ma_share_mode shareMode, ma_device_info* pInfo) { - ma_assert(pAudioClient != NULL); - ma_assert(pInfo != NULL); + MA_ASSERT(pAudioClient != NULL); + MA_ASSERT(pInfo != NULL); /* We use a different technique to retrieve the device information depending on whether or not we are using shared or exclusive mode. */ if (shareMode == ma_share_mode_shared) { @@ -7846,7 +8282,7 @@ ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pCont ma_get_standard_channel_map(ma_standard_channel_map_microsoft, channels, defaultChannelMap); - ma_zero_object(&wf); + MA_ZERO_OBJECT(&wf); wf.Format.cbSize = sizeof(wf); wf.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; wf.Format.nChannels = (WORD)channels; @@ -7907,13 +8343,13 @@ ma_result ma_context_get_device_info_from_IAudioClient__wasapi(ma_context* pCont } #ifdef MA_WIN32_DESKTOP -ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice) +static ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IMMDevice** ppMMDevice) { ma_IMMDeviceEnumerator* pDeviceEnumerator; HRESULT hr; - ma_assert(pContext != NULL); - ma_assert(ppMMDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppMMDevice != NULL); hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); if (FAILED(hr)) { @@ -7934,14 +8370,14 @@ ma_result ma_context_get_MMDevice__wasapi(ma_context* pContext, ma_device_type d return MA_SUCCESS; } -ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_share_mode shareMode, ma_bool32 onlySimpleInfo, ma_device_info* pInfo) +static ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, ma_IMMDevice* pMMDevice, ma_share_mode shareMode, ma_bool32 onlySimpleInfo, ma_device_info* pInfo) { LPWSTR id; HRESULT hr; - ma_assert(pContext != NULL); - ma_assert(pMMDevice != NULL); - ma_assert(pInfo != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pMMDevice != NULL); + MA_ASSERT(pInfo != NULL); /* ID. */ hr = ma_IMMDevice_GetId(pMMDevice, &id); @@ -7949,11 +8385,11 @@ ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, size_t idlen = wcslen(id); if (idlen+1 > ma_countof(pInfo->id.wasapi)) { ma_CoTaskMemFree(pContext, id); - ma_assert(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */ + MA_ASSERT(MA_FALSE); /* NOTE: If this is triggered, please report it. It means the format of the ID must haved change and is too long to fit in our fixed sized buffer. */ return MA_ERROR; } - ma_copy_memory(pInfo->id.wasapi, id, idlen * sizeof(wchar_t)); + MA_COPY_MEMORY(pInfo->id.wasapi, id, idlen * sizeof(wchar_t)); pInfo->id.wasapi[idlen] = '\0'; ma_CoTaskMemFree(pContext, id); @@ -7994,14 +8430,14 @@ ma_result ma_context_get_device_info_from_MMDevice__wasapi(ma_context* pContext, return MA_SUCCESS; } -ma_result ma_context_enumerate_device_collection__wasapi(ma_context* pContext, ma_IMMDeviceCollection* pDeviceCollection, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_device_collection__wasapi(ma_context* pContext, ma_IMMDeviceCollection* pDeviceCollection, ma_device_type deviceType, ma_enum_devices_callback_proc callback, void* pUserData) { UINT deviceCount; HRESULT hr; ma_uint32 iDevice; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); hr = ma_IMMDeviceCollection_GetCount(pDeviceCollection, &deviceCount); if (FAILED(hr)) { @@ -8012,7 +8448,7 @@ ma_result ma_context_enumerate_device_collection__wasapi(ma_context* pContext, m ma_device_info deviceInfo; ma_IMMDevice* pMMDevice; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); hr = ma_IMMDeviceCollection_Item(pDeviceCollection, iDevice, &pMMDevice); if (SUCCEEDED(hr)) { @@ -8033,14 +8469,14 @@ ma_result ma_context_enumerate_device_collection__wasapi(ma_context* pContext, m #endif #ifdef MA_WIN32_DESKTOP -ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice) +static ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IMMDevice** ppMMDevice) { ma_result result; HRESULT hr; - ma_assert(pContext != NULL); - ma_assert(ppAudioClient != NULL); - ma_assert(ppMMDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); + MA_ASSERT(ppMMDevice != NULL); result = ma_context_get_MMDevice__wasapi(pContext, deviceType, pDeviceID, ppMMDevice); if (result != MA_SUCCESS) { @@ -8055,7 +8491,7 @@ ma_result ma_context_get_IAudioClient_Desktop__wasapi(ma_context* pContext, ma_d return MA_SUCCESS; } #else -ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface) +static ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_IUnknown** ppActivatedInterface) { ma_IActivateAudioInterfaceAsyncOperation *pAsyncOp = NULL; ma_completion_handler_uwp completionHandler; @@ -8066,11 +8502,11 @@ ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_devic HRESULT activateResult; ma_IUnknown* pActivatedInterface; - ma_assert(pContext != NULL); - ma_assert(ppAudioClient != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppAudioClient != NULL); if (pDeviceID != NULL) { - ma_copy_memory(&iid, pDeviceID->wasapi, sizeof(iid)); + MA_COPY_MEMORY(&iid, pDeviceID->wasapi, sizeof(iid)); } else { if (deviceType == ma_device_type_playback) { iid = MA_IID_DEVINTERFACE_AUDIO_RENDER; @@ -8134,7 +8570,7 @@ ma_result ma_context_get_IAudioClient_UWP__wasapi(ma_context* pContext, ma_devic } #endif -ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface) +static ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_IAudioClient** ppAudioClient, ma_WASAPIDeviceInterface** ppDeviceInterface) { #ifdef MA_WIN32_DESKTOP return ma_context_get_IAudioClient_Desktop__wasapi(pContext, deviceType, pDeviceID, ppAudioClient, ppDeviceInterface); @@ -8144,7 +8580,7 @@ ma_result ma_context_get_IAudioClient__wasapi(ma_context* pContext, ma_device_ty } -ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { /* Different enumeration for desktop and UWP. */ #ifdef MA_WIN32_DESKTOP @@ -8188,7 +8624,7 @@ ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_dev /* Playback. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); } @@ -8196,7 +8632,7 @@ ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_dev /* Capture. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); } @@ -8206,7 +8642,7 @@ ma_result ma_context_enumerate_devices__wasapi(ma_context* pContext, ma_enum_dev return MA_SUCCESS; } -ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { #ifdef MA_WIN32_DESKTOP ma_IMMDevice* pMMDevice = NULL; @@ -8249,9 +8685,9 @@ ma_result ma_context_get_device_info__wasapi(ma_context* pContext, ma_device_typ #endif } -void ma_device_uninit__wasapi(ma_device* pDevice) +static void ma_device_uninit__wasapi(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); #ifdef MA_WIN32_DESKTOP if (pDevice->wasapi.pDeviceEnumerator) { @@ -8300,6 +8736,7 @@ typedef struct ma_share_mode shareMode; ma_bool32 noAutoConvertSRC; ma_bool32 noDefaultQualitySRC; + ma_bool32 noHardwareOffloading; /* Output. */ ma_IAudioClient* pAudioClient; @@ -8316,7 +8753,7 @@ typedef struct char deviceName[256]; } ma_device_init_internal_data__wasapi; -ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData) +static ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__wasapi* pData) { HRESULT hr; ma_result result = MA_SUCCESS; @@ -8330,8 +8767,8 @@ ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type d ma_IAudioClient2* pAudioClient2; ma_uint32 nativeSampleRate; - ma_assert(pContext != NULL); - ma_assert(pData != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pData != NULL); /* This function is only used to initialize one device type: either playback, capture or loopback. Never full-duplex. */ if (deviceType == ma_device_type_duplex) { @@ -8360,20 +8797,22 @@ ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type d /* Try enabling hardware offloading. */ - hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2); - if (SUCCEEDED(hr)) { - BOOL isHardwareOffloadingSupported = 0; - hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported); - if (SUCCEEDED(hr) && isHardwareOffloadingSupported) { - ma_AudioClientProperties clientProperties; - ma_zero_object(&clientProperties); - clientProperties.cbSize = sizeof(clientProperties); - clientProperties.bIsOffload = 1; - clientProperties.eCategory = MA_AudioCategory_Other; - ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties); - } + if (!pData->noHardwareOffloading) { + hr = ma_IAudioClient_QueryInterface(pData->pAudioClient, &MA_IID_IAudioClient2, (void**)&pAudioClient2); + if (SUCCEEDED(hr)) { + BOOL isHardwareOffloadingSupported = 0; + hr = ma_IAudioClient2_IsOffloadCapable(pAudioClient2, MA_AudioCategory_Other, &isHardwareOffloadingSupported); + if (SUCCEEDED(hr) && isHardwareOffloadingSupported) { + ma_AudioClientProperties clientProperties; + MA_ZERO_OBJECT(&clientProperties); + clientProperties.cbSize = sizeof(clientProperties); + clientProperties.bIsOffload = 1; + clientProperties.eCategory = MA_AudioCategory_Other; + ma_IAudioClient2_SetClientProperties(pAudioClient2, &clientProperties); + } - pAudioClient2->lpVtbl->Release(pAudioClient2); + pAudioClient2->lpVtbl->Release(pAudioClient2); + } } /* Here is where we try to determine the best format to use with the device. If the client if wanting exclusive mode, first try finding the best format for that. If this fails, fall back to shared mode. */ @@ -8391,7 +8830,7 @@ ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type d WAVEFORMATEX* pActualFormat = (WAVEFORMATEX*)prop.blob.pBlobData; hr = ma_IAudioClient_IsFormatSupported((ma_IAudioClient*)pData->pAudioClient, MA_AUDCLNT_SHAREMODE_EXCLUSIVE, pActualFormat, NULL); if (SUCCEEDED(hr)) { - ma_copy_memory(&wf, pActualFormat, sizeof(WAVEFORMATEXTENSIBLE)); + MA_COPY_MEMORY(&wf, pActualFormat, sizeof(WAVEFORMATEXTENSIBLE)); } ma_PropVariantClear(pContext, &prop); @@ -8423,7 +8862,7 @@ ma_result ma_device_init_internal__wasapi(ma_context* pContext, ma_device_type d if (hr != S_OK) { result = MA_FORMAT_NOT_SUPPORTED; } else { - ma_copy_memory(&wf, pNativeFormat, sizeof(wf)); + MA_COPY_MEMORY(&wf, pNativeFormat, sizeof(wf)); result = MA_SUCCESS; } @@ -8701,12 +9140,12 @@ done: } } -ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType) +static ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType) { ma_device_init_internal_data__wasapi data; ma_result result; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* We only re-initialize the playback or capture device. Never a full-duplex device. */ if (deviceType == ma_device_type_duplex) { @@ -8716,7 +9155,7 @@ ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType if (deviceType == ma_device_type_playback) { data.formatIn = pDevice->playback.format; data.channelsIn = pDevice->playback.channels; - ma_copy_memory(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); data.shareMode = pDevice->playback.shareMode; data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; @@ -8724,7 +9163,7 @@ ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType } else { data.formatIn = pDevice->capture.format; data.channelsIn = pDevice->capture.channels; - ma_copy_memory(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); data.shareMode = pDevice->capture.shareMode; data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; @@ -8738,6 +9177,7 @@ ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType data.periodsIn = pDevice->wasapi.originalPeriods; data.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC; data.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pDevice->wasapi.noHardwareOffloading; result = ma_device_init_internal__wasapi(pDevice->pContext, deviceType, NULL, &data); if (result != MA_SUCCESS) { return result; @@ -8761,7 +9201,7 @@ ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType pDevice->capture.internalFormat = data.formatOut; pDevice->capture.internalChannels = data.channelsOut; pDevice->capture.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->capture.internalPeriods = data.periodsOut; ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName); @@ -8797,7 +9237,7 @@ ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType pDevice->playback.internalFormat = data.formatOut; pDevice->playback.internalChannels = data.channelsOut; pDevice->playback.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->playback.internalPeriods = data.periodsOut; ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName); @@ -8819,21 +9259,22 @@ ma_result ma_device_reinit__wasapi(ma_device* pDevice, ma_device_type deviceType return MA_SUCCESS; } -ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result = MA_SUCCESS; (void)pContext; - ma_assert(pContext != NULL); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDevice != NULL); - ma_zero_object(&pDevice->wasapi); + MA_ZERO_OBJECT(&pDevice->wasapi); pDevice->wasapi.originalBufferSizeInFrames = pConfig->bufferSizeInFrames; pDevice->wasapi.originalBufferSizeInMilliseconds = pConfig->bufferSizeInMilliseconds; pDevice->wasapi.originalPeriods = pConfig->periods; - pDevice->wasapi.noAutoConvertSRC = pDevice->wasapi.noAutoConvertSRC; - pDevice->wasapi.noDefaultQualitySRC = pDevice->wasapi.noDefaultQualitySRC; + pDevice->wasapi.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; + pDevice->wasapi.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + pDevice->wasapi.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; /* Exclusive mode is not allowed with loopback. */ if (pConfig->deviceType == ma_device_type_loopback && pConfig->playback.shareMode == ma_share_mode_exclusive) { @@ -8845,7 +9286,7 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p data.formatIn = pConfig->capture.format; data.channelsIn = pConfig->capture.channels; data.sampleRateIn = pConfig->sampleRate; - ma_copy_memory(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap)); data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; @@ -8856,6 +9297,7 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p data.periodsIn = pConfig->periods; data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; result = ma_device_init_internal__wasapi(pDevice->pContext, (pConfig->deviceType == ma_device_type_loopback) ? ma_device_type_loopback : ma_device_type_capture, pConfig->capture.pDeviceID, &data); if (result != MA_SUCCESS) { @@ -8868,7 +9310,7 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p pDevice->capture.internalFormat = data.formatOut; pDevice->capture.internalChannels = data.channelsOut; pDevice->capture.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->capture.internalPeriods = data.periodsOut; ma_strcpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), data.deviceName); @@ -8901,7 +9343,7 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p data.formatIn = pConfig->playback.format; data.channelsIn = pConfig->playback.channels; data.sampleRateIn = pConfig->sampleRate; - ma_copy_memory(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap)); data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; @@ -8912,6 +9354,7 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p data.periodsIn = pConfig->periods; data.noAutoConvertSRC = pConfig->wasapi.noAutoConvertSRC; data.noDefaultQualitySRC = pConfig->wasapi.noDefaultQualitySRC; + data.noHardwareOffloading = pConfig->wasapi.noHardwareOffloading; result = ma_device_init_internal__wasapi(pDevice->pContext, ma_device_type_playback, pConfig->playback.pDeviceID, &data); if (result != MA_SUCCESS) { @@ -8937,7 +9380,7 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p pDevice->playback.internalFormat = data.formatOut; pDevice->playback.internalChannels = data.channelsOut; pDevice->playback.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->playback.internalPeriods = data.periodsOut; ma_strcpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), data.deviceName); @@ -8987,24 +9430,33 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p registering a IMMNotificationClient with it. We only care about this if it's the default device. */ #ifdef MA_WIN32_DESKTOP - { - ma_IMMDeviceEnumerator* pDeviceEnumerator; - HRESULT hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); - if (FAILED(hr)) { - ma_device_uninit__wasapi(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + if (pConfig->wasapi.noAutoStreamRouting == MA_FALSE) { + if ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.pDeviceID == NULL) { + pDevice->wasapi.allowCaptureAutoStreamRouting = MA_TRUE; + } + if ((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.pDeviceID == NULL) { + pDevice->wasapi.allowPlaybackAutoStreamRouting = MA_TRUE; } - pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl; - pDevice->wasapi.notificationClient.counter = 1; - pDevice->wasapi.notificationClient.pDevice = pDevice; + if (pDevice->wasapi.allowCaptureAutoStreamRouting || pDevice->wasapi.allowPlaybackAutoStreamRouting) { + ma_IMMDeviceEnumerator* pDeviceEnumerator; + HRESULT hr = ma_CoCreateInstance(pContext, MA_CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, MA_IID_IMMDeviceEnumerator, (void**)&pDeviceEnumerator); + if (FAILED(hr)) { + ma_device_uninit__wasapi(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to create device enumerator.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient); - if (SUCCEEDED(hr)) { - pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator; - } else { - /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */ - ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + pDevice->wasapi.notificationClient.lpVtbl = (void*)&g_maNotificationCientVtbl; + pDevice->wasapi.notificationClient.counter = 1; + pDevice->wasapi.notificationClient.pDevice = pDevice; + + hr = pDeviceEnumerator->lpVtbl->RegisterEndpointNotificationCallback(pDeviceEnumerator, &pDevice->wasapi.notificationClient); + if (SUCCEEDED(hr)) { + pDevice->wasapi.pDeviceEnumerator = (ma_ptr)pDeviceEnumerator; + } else { + /* Not the end of the world if we fail to register the notification callback. We just won't support automatic stream routing. */ + ma_IMMDeviceEnumerator_Release(pDeviceEnumerator); + } } } #endif @@ -9015,14 +9467,14 @@ ma_result ma_device_init__wasapi(ma_context* pContext, const ma_device_config* p return MA_SUCCESS; } -ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount) +static ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioClient* pAudioClient, ma_uint32* pFrameCount) { ma_uint32 paddingFramesCount; HRESULT hr; ma_share_mode shareMode; - ma_assert(pDevice != NULL); - ma_assert(pFrameCount != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrameCount != NULL); *pFrameCount = 0; @@ -9050,9 +9502,9 @@ ma_result ma_device__get_available_frames__wasapi(ma_device* pDevice, ma_IAudioC return MA_SUCCESS; } -ma_bool32 ma_device_is_reroute_required__wasapi(ma_device* pDevice, ma_device_type deviceType) +static ma_bool32 ma_device_is_reroute_required__wasapi(ma_device* pDevice, ma_device_type deviceType) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (deviceType == ma_device_type_playback) { return pDevice->wasapi.hasDefaultPlaybackDeviceChanged; @@ -9065,7 +9517,7 @@ ma_bool32 ma_device_is_reroute_required__wasapi(ma_device* pDevice, ma_device_ty return MA_FALSE; } -ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType) +static ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceType) { ma_result result; @@ -9096,9 +9548,9 @@ ma_result ma_device_reroute__wasapi(ma_device* pDevice, ma_device_type deviceTyp } -ma_result ma_device_stop__wasapi(ma_device* pDevice) +static ma_result ma_device_stop__wasapi(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* We need to explicitly signal the capture event in loopback mode to ensure we return from WaitForSingleObject() when nothing is being played. When nothing @@ -9112,27 +9564,31 @@ ma_result ma_device_stop__wasapi(ma_device* pDevice) } -ma_result ma_device_main_loop__wasapi(ma_device* pDevice) +static ma_result ma_device_main_loop__wasapi(ma_device* pDevice) { ma_result result; HRESULT hr; ma_bool32 exitLoop = MA_FALSE; ma_uint32 framesWrittenToPlaybackDevice = 0; - ma_uint32 mappedBufferSizeInFramesCapture = 0; - ma_uint32 mappedBufferSizeInFramesPlayback = 0; - ma_uint32 mappedBufferFramesRemainingCapture = 0; - ma_uint32 mappedBufferFramesRemainingPlayback = 0; - BYTE* pMappedBufferCapture = NULL; - BYTE* pMappedBufferPlayback = NULL; - ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - ma_uint8 inputDataInExternalFormat[4096]; - ma_uint32 inputDataInExternalFormatCap = sizeof(inputDataInExternalFormat) / bpfCapture; - ma_uint8 outputDataInExternalFormat[4096]; - ma_uint32 outputDataInExternalFormatCap = sizeof(outputDataInExternalFormat) / bpfPlayback; + ma_uint32 mappedDeviceBufferSizeInFramesCapture = 0; + ma_uint32 mappedDeviceBufferSizeInFramesPlayback = 0; + ma_uint32 mappedDeviceBufferFramesRemainingCapture = 0; + ma_uint32 mappedDeviceBufferFramesRemainingPlayback = 0; + BYTE* pMappedDeviceBufferCapture = NULL; + BYTE* pMappedDeviceBufferPlayback = NULL; + ma_uint32 bpfCaptureDevice = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 bpfPlaybackDevice = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 bpfCaptureClient = ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 bpfPlaybackClient = ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint8 inputDataInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 inputDataInClientFormatCap = sizeof(inputDataInClientFormat) / bpfCaptureClient; + ma_uint8 outputDataInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 outputDataInClientFormatCap = sizeof(outputDataInClientFormat) / bpfPlaybackClient; + ma_uint32 outputDataInClientFormatCount = 0; + ma_uint32 outputDataInClientFormatConsumed = 0; ma_uint32 periodSizeInFramesCapture = 0; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* The capture device needs to be started immediately. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { @@ -9171,7 +9627,7 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) DWORD flagsCapture; /* Passed to IAudioCaptureClient_GetBuffer(). */ /* The process is to map the playback buffer and fill it as quickly as possible from input data. */ - if (pMappedBufferPlayback == NULL) { + if (pMappedDeviceBufferPlayback == NULL) { /* WASAPI is weird with exclusive mode. You need to wait on the event _before_ querying the available frames. */ if (pDevice->playback.shareMode == ma_share_mode_exclusive) { if (WaitForSingleObject(pDevice->wasapi.hEventPlayback, INFINITE) == WAIT_FAILED) { @@ -9207,21 +9663,21 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /* We're ready to map the playback device's buffer. We don't release this until it's been entirely filled. */ - hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedBufferPlayback); + hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedDeviceBufferPlayback); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); exitLoop = MA_TRUE; break; } - mappedBufferSizeInFramesPlayback = framesAvailablePlayback; - mappedBufferFramesRemainingPlayback = framesAvailablePlayback; + mappedDeviceBufferSizeInFramesPlayback = framesAvailablePlayback; + mappedDeviceBufferFramesRemainingPlayback = framesAvailablePlayback; } /* At this point we should have a buffer available for output. We need to keep writing input samples to it. */ for (;;) { /* Try grabbing some captured data if we haven't already got a mapped buffer. */ - if (pMappedBufferCapture == NULL) { + if (pMappedDeviceBufferCapture == NULL) { if (pDevice->capture.shareMode == ma_share_mode_shared) { if (WaitForSingleObject(pDevice->wasapi.hEventCapture, INFINITE) == WAIT_FAILED) { return MA_ERROR; /* Wait failed. */ @@ -9249,8 +9705,8 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /* Getting here means there's data available for writing to the output device. */ - mappedBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); - hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedBufferCapture, &mappedBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); exitLoop = MA_TRUE; @@ -9262,7 +9718,7 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY) != 0) { /* Glitched. Probably due to an overrun. */ #ifdef MA_DEBUG_OUTPUT - printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedBufferSizeInFramesCapture); + printf("[WASAPI] Data discontinuity (possible overrun). framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture); #endif /* @@ -9276,28 +9732,28 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) #endif do { - hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture); + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); if (FAILED(hr)) { break; } - framesAvailableCapture -= mappedBufferSizeInFramesCapture; + framesAvailableCapture -= mappedDeviceBufferSizeInFramesCapture; if (framesAvailableCapture > 0) { - mappedBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); - hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedBufferCapture, &mappedBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + mappedDeviceBufferSizeInFramesCapture = ma_min(framesAvailableCapture, periodSizeInFramesCapture); + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); exitLoop = MA_TRUE; break; } } else { - pMappedBufferCapture = NULL; - mappedBufferSizeInFramesCapture = 0; + pMappedDeviceBufferCapture = NULL; + mappedDeviceBufferSizeInFramesCapture = 0; } } while (framesAvailableCapture > periodSizeInFramesCapture); #ifdef MA_DEBUG_OUTPUT - printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedBufferSizeInFramesCapture); + printf("framesAvailableCapture=%d, mappedBufferSizeInFramesCapture=%d\n", framesAvailableCapture, mappedDeviceBufferSizeInFramesCapture); #endif } } else { @@ -9308,35 +9764,36 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) #endif } - mappedBufferFramesRemainingCapture = mappedBufferSizeInFramesCapture; - - pDevice->capture._dspFrameCount = mappedBufferSizeInFramesCapture; - if ((flagsCapture & MA_AUDCLNT_BUFFERFLAGS_SILENT) == 0) { - pDevice->capture._dspFrames = (const ma_uint8*)pMappedBufferCapture; - } else { - pDevice->capture._dspFrames = NULL; - } + mappedDeviceBufferFramesRemainingCapture = mappedDeviceBufferSizeInFramesCapture; } /* At this point we should have both input and output data available. We now need to convert the data and post it to the client. */ for (;;) { - BYTE* pRunningBufferCapture; - BYTE* pRunningBufferPlayback; + BYTE* pRunningDeviceBufferCapture; + BYTE* pRunningDeviceBufferPlayback; ma_uint32 framesToProcess; ma_uint32 framesProcessed; - pRunningBufferCapture = pMappedBufferCapture + ((mappedBufferSizeInFramesCapture - mappedBufferFramesRemainingCapture ) * bpfPlayback); - pRunningBufferPlayback = pMappedBufferPlayback + ((mappedBufferSizeInFramesPlayback - mappedBufferFramesRemainingPlayback) * bpfPlayback); + pRunningDeviceBufferCapture = pMappedDeviceBufferCapture + ((mappedDeviceBufferSizeInFramesCapture - mappedDeviceBufferFramesRemainingCapture ) * bpfCaptureDevice); + pRunningDeviceBufferPlayback = pMappedDeviceBufferPlayback + ((mappedDeviceBufferSizeInFramesPlayback - mappedDeviceBufferFramesRemainingPlayback) * bpfPlaybackDevice); /* There may be some data sitting in the converter that needs to be processed first. Once this is exhaused, run the data callback again. */ - if (!pDevice->playback.converter.isPassthrough) { - framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, pRunningBufferPlayback, mappedBufferFramesRemainingPlayback); - if (framesProcessed > 0) { - mappedBufferFramesRemainingPlayback -= framesProcessed; - if (mappedBufferFramesRemainingPlayback == 0) { - break; - } + if (!pDevice->playback.converter.isPassthrough && outputDataInClientFormatConsumed < outputDataInClientFormatCount) { + ma_uint64 convertedFrameCountClient = (outputDataInClientFormatCount - outputDataInClientFormatConsumed); + ma_uint64 convertedFrameCountDevice = mappedDeviceBufferFramesRemainingPlayback; + void* pConvertedFramesClient = outputDataInClientFormat + (outputDataInClientFormatConsumed * bpfPlaybackClient); + void* pConvertedFramesDevice = pRunningDeviceBufferPlayback; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesClient, &convertedFrameCountClient, pConvertedFramesDevice, &convertedFrameCountDevice); + if (result != MA_SUCCESS) { + break; + } + + outputDataInClientFormatConsumed += (ma_uint32)convertedFrameCountClient; /* Safe cast. */ + mappedDeviceBufferFramesRemainingPlayback -= (ma_uint32)convertedFrameCountDevice; /* Safe cast. */ + + if (mappedDeviceBufferFramesRemainingPlayback == 0) { + break; } } @@ -9346,81 +9803,76 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) */ if (pDevice->capture.converter.isPassthrough && pDevice->playback.converter.isPassthrough) { /* Optimal path. We can pass mapped pointers directly to the callback. */ - framesToProcess = ma_min(mappedBufferFramesRemainingCapture, mappedBufferFramesRemainingPlayback); + framesToProcess = ma_min(mappedDeviceBufferFramesRemainingCapture, mappedDeviceBufferFramesRemainingPlayback); framesProcessed = framesToProcess; - ma_device__on_data(pDevice, pRunningBufferPlayback, pRunningBufferCapture, framesToProcess); + ma_device__on_data(pDevice, pRunningDeviceBufferPlayback, pRunningDeviceBufferCapture, framesToProcess); - mappedBufferFramesRemainingCapture -= framesProcessed; - mappedBufferFramesRemainingPlayback -= framesProcessed; + mappedDeviceBufferFramesRemainingCapture -= framesProcessed; + mappedDeviceBufferFramesRemainingPlayback -= framesProcessed; - if (mappedBufferFramesRemainingCapture == 0) { + if (mappedDeviceBufferFramesRemainingCapture == 0) { break; /* Exhausted input data. */ } - if (mappedBufferFramesRemainingPlayback == 0) { + if (mappedDeviceBufferFramesRemainingPlayback == 0) { break; /* Exhausted output data. */ } } else if (pDevice->capture.converter.isPassthrough) { /* The input buffer is a passthrough, but the playback buffer requires a conversion. */ - framesToProcess = ma_min(mappedBufferFramesRemainingCapture, outputDataInExternalFormatCap); + framesToProcess = ma_min(mappedDeviceBufferFramesRemainingCapture, outputDataInClientFormatCap); framesProcessed = framesToProcess; - ma_device__on_data(pDevice, outputDataInExternalFormat, pRunningBufferCapture, framesToProcess); - mappedBufferFramesRemainingCapture -= framesProcessed; + ma_device__on_data(pDevice, outputDataInClientFormat, pRunningDeviceBufferCapture, framesToProcess); + outputDataInClientFormatCount = framesProcessed; + outputDataInClientFormatConsumed = 0; - pDevice->playback._dspFrameCount = framesProcessed; - pDevice->playback._dspFrames = (const ma_uint8*)outputDataInExternalFormat; - - if (mappedBufferFramesRemainingCapture == 0) { + mappedDeviceBufferFramesRemainingCapture -= framesProcessed; + if (mappedDeviceBufferFramesRemainingCapture == 0) { break; /* Exhausted input data. */ } } else if (pDevice->playback.converter.isPassthrough) { /* The input buffer requires conversion, the playback buffer is passthrough. */ - framesToProcess = ma_min(inputDataInExternalFormatCap, mappedBufferFramesRemainingPlayback); - framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, inputDataInExternalFormat, framesToProcess); - if (framesProcessed == 0) { - /* Getting here means we've run out of input data. */ - mappedBufferFramesRemainingCapture = 0; + ma_uint64 capturedDeviceFramesToProcess = mappedDeviceBufferFramesRemainingCapture; + ma_uint64 capturedClientFramesToProcess = ma_min(inputDataInClientFormatCap, mappedDeviceBufferFramesRemainingPlayback); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningDeviceBufferCapture, &capturedDeviceFramesToProcess, inputDataInClientFormat, &capturedClientFramesToProcess); + if (result != MA_SUCCESS) { break; } - ma_device__on_data(pDevice, pRunningBufferPlayback, inputDataInExternalFormat, framesProcessed); - mappedBufferFramesRemainingPlayback -= framesProcessed; - - if (framesProcessed < framesToProcess) { - mappedBufferFramesRemainingCapture = 0; - break; /* Exhausted input data. */ + if (capturedClientFramesToProcess == 0) { + break; } - if (mappedBufferFramesRemainingPlayback == 0) { - break; /* Exhausted output data. */ - } + ma_device__on_data(pDevice, pRunningDeviceBufferPlayback, inputDataInClientFormat, (ma_uint32)capturedClientFramesToProcess); /* Safe cast. */ + + mappedDeviceBufferFramesRemainingCapture -= (ma_uint32)capturedDeviceFramesToProcess; + mappedDeviceBufferFramesRemainingPlayback -= (ma_uint32)capturedClientFramesToProcess; } else { - framesToProcess = ma_min(inputDataInExternalFormatCap, outputDataInExternalFormatCap); - framesProcessed = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, inputDataInExternalFormat, framesToProcess); - if (framesProcessed == 0) { - /* Getting here means we've run out of input data. */ - mappedBufferFramesRemainingCapture = 0; + ma_uint64 capturedDeviceFramesToProcess = mappedDeviceBufferFramesRemainingCapture; + ma_uint64 capturedClientFramesToProcess = ma_min(inputDataInClientFormatCap, outputDataInClientFormatCap); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningDeviceBufferCapture, &capturedDeviceFramesToProcess, inputDataInClientFormat, &capturedClientFramesToProcess); + if (result != MA_SUCCESS) { break; } - ma_device__on_data(pDevice, outputDataInExternalFormat, inputDataInExternalFormat, framesProcessed); - - pDevice->playback._dspFrameCount = framesProcessed; - pDevice->playback._dspFrames = (const ma_uint8*)outputDataInExternalFormat; - - if (framesProcessed < framesToProcess) { - /* Getting here means we've run out of input data. */ - mappedBufferFramesRemainingCapture = 0; + if (capturedClientFramesToProcess == 0) { break; } + + ma_device__on_data(pDevice, outputDataInClientFormat, inputDataInClientFormat, (ma_uint32)capturedClientFramesToProcess); + + mappedDeviceBufferFramesRemainingCapture -= (ma_uint32)capturedDeviceFramesToProcess; + outputDataInClientFormatCount = (ma_uint32)capturedClientFramesToProcess; + outputDataInClientFormatConsumed = 0; } } /* If at this point we've run out of capture data we need to release the buffer. */ - if (mappedBufferFramesRemainingCapture == 0 && pMappedBufferCapture != NULL) { - hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture); + if (mappedDeviceBufferFramesRemainingCapture == 0 && pMappedDeviceBufferCapture != NULL) { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); exitLoop = MA_TRUE; @@ -9429,21 +9881,21 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) /*printf("TRACE: Released capture buffer\n");*/ - pMappedBufferCapture = NULL; - mappedBufferFramesRemainingCapture = 0; - mappedBufferSizeInFramesCapture = 0; + pMappedDeviceBufferCapture = NULL; + mappedDeviceBufferFramesRemainingCapture = 0; + mappedDeviceBufferSizeInFramesCapture = 0; } /* Get out of this loop if we're run out of room in the playback buffer. */ - if (mappedBufferFramesRemainingPlayback == 0) { + if (mappedDeviceBufferFramesRemainingPlayback == 0) { break; } } /* If at this point we've run out of data we need to release the buffer. */ - if (mappedBufferFramesRemainingPlayback == 0 && pMappedBufferPlayback != NULL) { - hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedBufferSizeInFramesPlayback, 0); + if (mappedDeviceBufferFramesRemainingPlayback == 0 && pMappedDeviceBufferPlayback != NULL) { + hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedDeviceBufferSizeInFramesPlayback, 0); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); exitLoop = MA_TRUE; @@ -9451,11 +9903,11 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /*printf("TRACE: Released playback buffer\n");*/ - framesWrittenToPlaybackDevice += mappedBufferSizeInFramesPlayback; + framesWrittenToPlaybackDevice += mappedDeviceBufferSizeInFramesPlayback; - pMappedBufferPlayback = NULL; - mappedBufferFramesRemainingPlayback = 0; - mappedBufferSizeInFramesPlayback = 0; + pMappedDeviceBufferPlayback = NULL; + mappedDeviceBufferFramesRemainingPlayback = 0; + mappedDeviceBufferSizeInFramesPlayback = 0; } if (!pDevice->wasapi.isStartedPlayback) { @@ -9504,8 +9956,8 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /* Map the data buffer in preparation for sending to the client. */ - mappedBufferSizeInFramesCapture = framesAvailableCapture; - hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedBufferCapture, &mappedBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); + mappedDeviceBufferSizeInFramesCapture = framesAvailableCapture; + hr = ma_IAudioCaptureClient_GetBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, (BYTE**)&pMappedDeviceBufferCapture, &mappedDeviceBufferSizeInFramesCapture, &flagsCapture, NULL, NULL); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); exitLoop = MA_TRUE; @@ -9513,12 +9965,12 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /* We should have a buffer at this point. */ - ma_device__send_frames_to_client(pDevice, mappedBufferSizeInFramesCapture, pMappedBufferCapture); + ma_device__send_frames_to_client(pDevice, mappedDeviceBufferSizeInFramesCapture, pMappedDeviceBufferCapture); /* At this point we're done with the buffer. */ - hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture); - pMappedBufferCapture = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */ - mappedBufferSizeInFramesCapture = 0; + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); + pMappedDeviceBufferCapture = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */ + mappedDeviceBufferSizeInFramesCapture = 0; if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); exitLoop = MA_TRUE; @@ -9550,7 +10002,7 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /* Map a the data buffer in preparation for the callback. */ - hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedBufferPlayback); + hr = ma_IAudioRenderClient_GetBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, &pMappedDeviceBufferPlayback); if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to retrieve internal buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); exitLoop = MA_TRUE; @@ -9558,12 +10010,12 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) } /* We should have a buffer at this point. */ - ma_device__read_frames_from_client(pDevice, framesAvailablePlayback, pMappedBufferPlayback); + ma_device__read_frames_from_client(pDevice, framesAvailablePlayback, pMappedDeviceBufferPlayback); /* At this point we're done writing to the device and we just need to release the buffer. */ hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, framesAvailablePlayback, 0); - pMappedBufferPlayback = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */ - mappedBufferSizeInFramesPlayback = 0; + pMappedDeviceBufferPlayback = NULL; /* <-- Important. Not doing this can result in an error once we leave this loop because it will use this to know whether or not a final ReleaseBuffer() needs to be called. */ + mappedDeviceBufferSizeInFramesPlayback = 0; if (FAILED(hr)) { ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[WASAPI] Failed to release internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); @@ -9592,8 +10044,8 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) /* Here is where the device needs to be stopped. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { /* Any mapped buffers need to be released. */ - if (pMappedBufferCapture != NULL) { - hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedBufferSizeInFramesCapture); + if (pMappedDeviceBufferCapture != NULL) { + hr = ma_IAudioCaptureClient_ReleaseBuffer((ma_IAudioCaptureClient*)pDevice->wasapi.pCaptureClient, mappedDeviceBufferSizeInFramesCapture); } hr = ma_IAudioClient_Stop((ma_IAudioClient*)pDevice->wasapi.pAudioClientCapture); @@ -9612,8 +10064,8 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { /* Any mapped buffers need to be released. */ - if (pMappedBufferPlayback != NULL) { - hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedBufferSizeInFramesPlayback, 0); + if (pMappedDeviceBufferPlayback != NULL) { + hr = ma_IAudioRenderClient_ReleaseBuffer((ma_IAudioRenderClient*)pDevice->wasapi.pRenderClient, mappedDeviceBufferSizeInFramesPlayback, 0); } /* @@ -9668,20 +10120,20 @@ ma_result ma_device_main_loop__wasapi(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_context_uninit__wasapi(ma_context* pContext) +static ma_result ma_context_uninit__wasapi(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_wasapi); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_wasapi); (void)pContext; return MA_SUCCESS; } -ma_result ma_context_init__wasapi(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__wasapi(const ma_context_config* pConfig, ma_context* pContext) { ma_result result = MA_SUCCESS; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)pConfig; @@ -9710,7 +10162,7 @@ ma_result ma_context_init__wasapi(const ma_context_config* pConfig, ma_context* return MA_NO_BACKEND; } - ma_zero_object(&osvi); + MA_ZERO_OBJECT(&osvi); osvi.dwOSVersionInfoSize = sizeof(osvi); osvi.dwMajorVersion = HIBYTE(MA_WIN32_WINNT_VISTA); osvi.dwMinorVersion = LOBYTE(MA_WIN32_WINNT_VISTA); @@ -9751,7 +10203,7 @@ DirectSound Backend #ifdef MA_HAS_DSOUND /*#include */ -GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}}; +static const GUID MA_GUID_IID_DirectSoundNotify = {0xb0210783, 0x89cd, 0x11d0, {0xaf, 0x08, 0x00, 0xa0, 0xc9, 0x25, 0xcd, 0x16}}; /* miniaudio only uses priority or exclusive modes. */ #define MA_DSSCL_NORMAL 1 @@ -9913,17 +10365,17 @@ struct ma_IDirectSound { ma_IDirectSoundVtbl* lpVtbl; }; -HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); } -HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); } -HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); } -HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); } -HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); } -HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); } -HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); } -HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } +static HRESULT ma_IDirectSound_QueryInterface(ma_IDirectSound* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IDirectSound_AddRef(ma_IDirectSound* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IDirectSound_Release(ma_IDirectSound* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IDirectSound_CreateSoundBuffer(ma_IDirectSound* pThis, const MA_DSBUFFERDESC* pDSBufferDesc, ma_IDirectSoundBuffer** ppDSBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateSoundBuffer(pThis, pDSBufferDesc, ppDSBuffer, pUnkOuter); } +static HRESULT ma_IDirectSound_GetCaps(ma_IDirectSound* pThis, MA_DSCAPS* pDSCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCaps); } +static HRESULT ma_IDirectSound_DuplicateSoundBuffer(ma_IDirectSound* pThis, ma_IDirectSoundBuffer* pDSBufferOriginal, ma_IDirectSoundBuffer** ppDSBufferDuplicate) { return pThis->lpVtbl->DuplicateSoundBuffer(pThis, pDSBufferOriginal, ppDSBufferDuplicate); } +static HRESULT ma_IDirectSound_SetCooperativeLevel(ma_IDirectSound* pThis, HWND hwnd, DWORD dwLevel) { return pThis->lpVtbl->SetCooperativeLevel(pThis, hwnd, dwLevel); } +static HRESULT ma_IDirectSound_Compact(ma_IDirectSound* pThis) { return pThis->lpVtbl->Compact(pThis); } +static HRESULT ma_IDirectSound_GetSpeakerConfig(ma_IDirectSound* pThis, DWORD* pSpeakerConfig) { return pThis->lpVtbl->GetSpeakerConfig(pThis, pSpeakerConfig); } +static HRESULT ma_IDirectSound_SetSpeakerConfig(ma_IDirectSound* pThis, DWORD dwSpeakerConfig) { return pThis->lpVtbl->SetSpeakerConfig(pThis, dwSpeakerConfig); } +static HRESULT ma_IDirectSound_Initialize(ma_IDirectSound* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } /* IDirectSoundBuffer */ @@ -9958,27 +10410,27 @@ struct ma_IDirectSoundBuffer { ma_IDirectSoundBufferVtbl* lpVtbl; }; -HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); } -HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); } -HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } -HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); } -HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); } -HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); } -HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } -HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); } -HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } -HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); } -HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); } -HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); } -HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); } -HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); } -HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); } -HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } -HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } -HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); } +static HRESULT ma_IDirectSoundBuffer_QueryInterface(ma_IDirectSoundBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IDirectSoundBuffer_AddRef(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IDirectSoundBuffer_Release(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IDirectSoundBuffer_GetCaps(ma_IDirectSoundBuffer* pThis, MA_DSBCAPS* pDSBufferCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSBufferCaps); } +static HRESULT ma_IDirectSoundBuffer_GetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD* pCurrentPlayCursor, DWORD* pCurrentWriteCursor) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCurrentPlayCursor, pCurrentWriteCursor); } +static HRESULT ma_IDirectSoundBuffer_GetFormat(ma_IDirectSoundBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static HRESULT ma_IDirectSoundBuffer_GetVolume(ma_IDirectSoundBuffer* pThis, LONG* pVolume) { return pThis->lpVtbl->GetVolume(pThis, pVolume); } +static HRESULT ma_IDirectSoundBuffer_GetPan(ma_IDirectSoundBuffer* pThis, LONG* pPan) { return pThis->lpVtbl->GetPan(pThis, pPan); } +static HRESULT ma_IDirectSoundBuffer_GetFrequency(ma_IDirectSoundBuffer* pThis, DWORD* pFrequency) { return pThis->lpVtbl->GetFrequency(pThis, pFrequency); } +static HRESULT ma_IDirectSoundBuffer_GetStatus(ma_IDirectSoundBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static HRESULT ma_IDirectSoundBuffer_Initialize(ma_IDirectSoundBuffer* pThis, ma_IDirectSound* pDirectSound, const MA_DSBUFFERDESC* pDSBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSound, pDSBufferDesc); } +static HRESULT ma_IDirectSoundBuffer_Lock(ma_IDirectSoundBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static HRESULT ma_IDirectSoundBuffer_Play(ma_IDirectSoundBuffer* pThis, DWORD dwReserved1, DWORD dwPriority, DWORD dwFlags) { return pThis->lpVtbl->Play(pThis, dwReserved1, dwPriority, dwFlags); } +static HRESULT ma_IDirectSoundBuffer_SetCurrentPosition(ma_IDirectSoundBuffer* pThis, DWORD dwNewPosition) { return pThis->lpVtbl->SetCurrentPosition(pThis, dwNewPosition); } +static HRESULT ma_IDirectSoundBuffer_SetFormat(ma_IDirectSoundBuffer* pThis, const WAVEFORMATEX* pFormat) { return pThis->lpVtbl->SetFormat(pThis, pFormat); } +static HRESULT ma_IDirectSoundBuffer_SetVolume(ma_IDirectSoundBuffer* pThis, LONG volume) { return pThis->lpVtbl->SetVolume(pThis, volume); } +static HRESULT ma_IDirectSoundBuffer_SetPan(ma_IDirectSoundBuffer* pThis, LONG pan) { return pThis->lpVtbl->SetPan(pThis, pan); } +static HRESULT ma_IDirectSoundBuffer_SetFrequency(ma_IDirectSoundBuffer* pThis, DWORD dwFrequency) { return pThis->lpVtbl->SetFrequency(pThis, dwFrequency); } +static HRESULT ma_IDirectSoundBuffer_Stop(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static HRESULT ma_IDirectSoundBuffer_Unlock(ma_IDirectSoundBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } +static HRESULT ma_IDirectSoundBuffer_Restore(ma_IDirectSoundBuffer* pThis) { return pThis->lpVtbl->Restore(pThis); } /* IDirectSoundCapture */ @@ -9998,12 +10450,12 @@ struct ma_IDirectSoundCapture { ma_IDirectSoundCaptureVtbl* lpVtbl; }; -HRESULT ma_IDirectSoundCapture_QueryInterface(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IDirectSoundCapture_AddRef(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IDirectSoundCapture_Release(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); } -HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); } -HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } +static HRESULT ma_IDirectSoundCapture_QueryInterface(ma_IDirectSoundCapture* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IDirectSoundCapture_AddRef(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IDirectSoundCapture_Release(ma_IDirectSoundCapture* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IDirectSoundCapture_CreateCaptureBuffer(ma_IDirectSoundCapture* pThis, const MA_DSCBUFFERDESC* pDSCBufferDesc, ma_IDirectSoundCaptureBuffer** ppDSCBuffer, void* pUnkOuter) { return pThis->lpVtbl->CreateCaptureBuffer(pThis, pDSCBufferDesc, ppDSCBuffer, pUnkOuter); } +static HRESULT ma_IDirectSoundCapture_GetCaps (ma_IDirectSoundCapture* pThis, MA_DSCCAPS* pDSCCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCCaps); } +static HRESULT ma_IDirectSoundCapture_Initialize (ma_IDirectSoundCapture* pThis, const GUID* pGuidDevice) { return pThis->lpVtbl->Initialize(pThis, pGuidDevice); } /* IDirectSoundCaptureBuffer */ @@ -10029,18 +10481,18 @@ struct ma_IDirectSoundCaptureBuffer { ma_IDirectSoundCaptureBufferVtbl* lpVtbl; }; -HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); } -HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); } -HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } -HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } -HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); } -HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } -HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); } -HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } -HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } +static HRESULT ma_IDirectSoundCaptureBuffer_QueryInterface(ma_IDirectSoundCaptureBuffer* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IDirectSoundCaptureBuffer_AddRef(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IDirectSoundCaptureBuffer_Release(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IDirectSoundCaptureBuffer_GetCaps(ma_IDirectSoundCaptureBuffer* pThis, MA_DSCBCAPS* pDSCBCaps) { return pThis->lpVtbl->GetCaps(pThis, pDSCBCaps); } +static HRESULT ma_IDirectSoundCaptureBuffer_GetCurrentPosition(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pCapturePosition, DWORD* pReadPosition) { return pThis->lpVtbl->GetCurrentPosition(pThis, pCapturePosition, pReadPosition); } +static HRESULT ma_IDirectSoundCaptureBuffer_GetFormat(ma_IDirectSoundCaptureBuffer* pThis, WAVEFORMATEX* pFormat, DWORD dwSizeAllocated, DWORD* pSizeWritten) { return pThis->lpVtbl->GetFormat(pThis, pFormat, dwSizeAllocated, pSizeWritten); } +static HRESULT ma_IDirectSoundCaptureBuffer_GetStatus(ma_IDirectSoundCaptureBuffer* pThis, DWORD* pStatus) { return pThis->lpVtbl->GetStatus(pThis, pStatus); } +static HRESULT ma_IDirectSoundCaptureBuffer_Initialize(ma_IDirectSoundCaptureBuffer* pThis, ma_IDirectSoundCapture* pDirectSoundCapture, const MA_DSCBUFFERDESC* pDSCBufferDesc) { return pThis->lpVtbl->Initialize(pThis, pDirectSoundCapture, pDSCBufferDesc); } +static HRESULT ma_IDirectSoundCaptureBuffer_Lock(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwOffset, DWORD dwBytes, void** ppAudioPtr1, DWORD* pAudioBytes1, void** ppAudioPtr2, DWORD* pAudioBytes2, DWORD dwFlags) { return pThis->lpVtbl->Lock(pThis, dwOffset, dwBytes, ppAudioPtr1, pAudioBytes1, ppAudioPtr2, pAudioBytes2, dwFlags); } +static HRESULT ma_IDirectSoundCaptureBuffer_Start(ma_IDirectSoundCaptureBuffer* pThis, DWORD dwFlags) { return pThis->lpVtbl->Start(pThis, dwFlags); } +static HRESULT ma_IDirectSoundCaptureBuffer_Stop(ma_IDirectSoundCaptureBuffer* pThis) { return pThis->lpVtbl->Stop(pThis); } +static HRESULT ma_IDirectSoundCaptureBuffer_Unlock(ma_IDirectSoundCaptureBuffer* pThis, void* pAudioPtr1, DWORD dwAudioBytes1, void* pAudioPtr2, DWORD dwAudioBytes2) { return pThis->lpVtbl->Unlock(pThis, pAudioPtr1, dwAudioBytes1, pAudioPtr2, dwAudioBytes2); } /* IDirectSoundNotify */ @@ -10058,10 +10510,10 @@ struct ma_IDirectSoundNotify { ma_IDirectSoundNotifyVtbl* lpVtbl; }; -HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } -ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); } -ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); } -HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); } +static HRESULT ma_IDirectSoundNotify_QueryInterface(ma_IDirectSoundNotify* pThis, const IID* const riid, void** ppObject) { return pThis->lpVtbl->QueryInterface(pThis, riid, ppObject); } +static ULONG ma_IDirectSoundNotify_AddRef(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->AddRef(pThis); } +static ULONG ma_IDirectSoundNotify_Release(ma_IDirectSoundNotify* pThis) { return pThis->lpVtbl->Release(pThis); } +static HRESULT ma_IDirectSoundNotify_SetNotificationPositions(ma_IDirectSoundNotify* pThis, DWORD dwPositionNotifies, const MA_DSBPOSITIONNOTIFY* pPositionNotifies) { return pThis->lpVtbl->SetNotificationPositions(pThis, dwPositionNotifies, pPositionNotifies); } typedef BOOL (CALLBACK * ma_DSEnumCallbackAProc) (LPGUID pDeviceGUID, LPCSTR pDeviceDescription, LPCSTR pModule, LPVOID pContext); @@ -10070,12 +10522,41 @@ typedef HRESULT (WINAPI * ma_DirectSoundEnumerateAProc) (ma_DSEnumCallba typedef HRESULT (WINAPI * ma_DirectSoundCaptureCreateProc) (const GUID* pcGuidDevice, ma_IDirectSoundCapture** ppDSC8, LPUNKNOWN pUnkOuter); typedef HRESULT (WINAPI * ma_DirectSoundCaptureEnumerateAProc)(ma_DSEnumCallbackAProc pDSEnumCallback, LPVOID pContext); +static ma_uint32 ma_get_best_sample_rate_within_range(ma_uint32 sampleRateMin, ma_uint32 sampleRateMax) +{ + /* Normalize the range in case we were given something stupid. */ + if (sampleRateMin < MA_MIN_SAMPLE_RATE) { + sampleRateMin = MA_MIN_SAMPLE_RATE; + } + if (sampleRateMax > MA_MAX_SAMPLE_RATE) { + sampleRateMax = MA_MAX_SAMPLE_RATE; + } + if (sampleRateMin > sampleRateMax) { + sampleRateMin = sampleRateMax; + } + + if (sampleRateMin == sampleRateMax) { + return sampleRateMax; + } else { + size_t iStandardRate; + for (iStandardRate = 0; iStandardRate < ma_countof(g_maStandardSampleRatePriorities); ++iStandardRate) { + ma_uint32 standardRate = g_maStandardSampleRatePriorities[iStandardRate]; + if (standardRate >= sampleRateMin && standardRate <= sampleRateMax) { + return standardRate; + } + } + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; +} /* Retrieves the channel count and channel map for the given speaker configuration. If the speaker configuration is unknown, the channel count and channel map will be left unmodified. */ -void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut) +static void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pChannelsOut, DWORD* pChannelMapOut) { WORD channels; DWORD channelMap; @@ -10117,13 +10598,13 @@ void ma_get_channels_from_speaker_config__dsound(DWORD speakerConfig, WORD* pCha } -ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound) +static ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSound** ppDirectSound) { ma_IDirectSound* pDirectSound; HWND hWnd; - ma_assert(pContext != NULL); - ma_assert(ppDirectSound != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSound != NULL); *ppDirectSound = NULL; pDirectSound = NULL; @@ -10145,12 +10626,12 @@ ma_result ma_context_create_IDirectSound__dsound(ma_context* pContext, ma_share_ return MA_SUCCESS; } -ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture) +static ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma_share_mode shareMode, const ma_device_id* pDeviceID, ma_IDirectSoundCapture** ppDirectSoundCapture) { ma_IDirectSoundCapture* pDirectSoundCapture; - ma_assert(pContext != NULL); - ma_assert(ppDirectSoundCapture != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppDirectSoundCapture != NULL); /* DirectSound does not support exclusive mode for capture. */ if (shareMode == ma_share_mode_exclusive) { @@ -10168,14 +10649,14 @@ ma_result ma_context_create_IDirectSoundCapture__dsound(ma_context* pContext, ma return MA_SUCCESS; } -ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate) +static ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* pContext, ma_IDirectSoundCapture* pDirectSoundCapture, WORD* pChannels, WORD* pBitsPerSample, DWORD* pSampleRate) { MA_DSCCAPS caps; WORD bitsPerSample; DWORD sampleRate; - ma_assert(pContext != NULL); - ma_assert(pDirectSoundCapture != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDirectSoundCapture != NULL); if (pChannels) { *pChannels = 0; @@ -10187,7 +10668,7 @@ ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* *pSampleRate = 0; } - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); caps.dwSize = sizeof(caps); if (FAILED(ma_IDirectSoundCapture_GetCaps(pDirectSoundCapture, &caps))) { return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundCapture_GetCaps() failed for capture device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); @@ -10267,11 +10748,11 @@ ma_result ma_context_get_format_info_for_IDirectSoundCapture__dsound(ma_context* return MA_SUCCESS; } -ma_bool32 ma_context_is_device_id_equal__dsound(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__dsound(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return memcmp(pID0->dsound, pID1->dsound, sizeof(pID0->dsound)) == 0; @@ -10287,18 +10768,18 @@ typedef struct ma_bool32 terminated; } ma_context_enumerate_devices_callback_data__dsound; -BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) +static BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) { ma_context_enumerate_devices_callback_data__dsound* pData = (ma_context_enumerate_devices_callback_data__dsound*)lpContext; ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); /* ID. */ if (lpGuid != NULL) { - ma_copy_memory(deviceInfo.id.dsound, lpGuid, 16); + MA_COPY_MEMORY(deviceInfo.id.dsound, lpGuid, 16); } else { - ma_zero_memory(deviceInfo.id.dsound, 16); + MA_ZERO_MEMORY(deviceInfo.id.dsound, 16); } /* Name / Description */ @@ -10306,7 +10787,7 @@ BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCST /* Call the callback function, but make sure we stop enumerating if the callee requested so. */ - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); pData->terminated = !pData->callback(pData->pContext, pData->deviceType, &deviceInfo, pData->pUserData); if (pData->terminated) { return FALSE; /* Stop enumeration. */ @@ -10317,12 +10798,12 @@ BOOL CALLBACK ma_context_enumerate_devices_callback__dsound(LPGUID lpGuid, LPCST (void)lpcstrModule; } -ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__dsound(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_context_enumerate_devices_callback_data__dsound data; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); data.pContext = pContext; data.callback = callback; @@ -10352,10 +10833,10 @@ typedef struct ma_bool32 found; } ma_context_get_device_info_callback_data__dsound; -BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) +static BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) { ma_context_get_device_info_callback_data__dsound* pData = (ma_context_get_device_info_callback_data__dsound*)lpContext; - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); if ((pData->pDeviceID == NULL || ma_is_guid_equal(pData->pDeviceID->dsound, &MA_GUID_NULL)) && (lpGuid == NULL || ma_is_guid_equal(lpGuid, &MA_GUID_NULL))) { /* Default device. */ @@ -10364,7 +10845,7 @@ BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR return FALSE; /* Stop enumeration. */ } else { /* Not the default device. */ - if (lpGuid != NULL) { + if (lpGuid != NULL && pData->pDeviceID != NULL) { if (memcmp(pData->pDeviceID->dsound, lpGuid, sizeof(pData->pDeviceID->dsound)) == 0) { ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), lpcstrDescription, (size_t)-1); pData->found = MA_TRUE; @@ -10377,7 +10858,7 @@ BOOL CALLBACK ma_context_get_device_info_callback__dsound(LPGUID lpGuid, LPCSTR return TRUE; } -ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { /* Exclusive mode and capture not supported with DirectSound. */ if (deviceType == ma_device_type_capture && shareMode == ma_share_mode_exclusive) { @@ -10388,7 +10869,7 @@ ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_typ ma_context_get_device_info_callback_data__dsound data; /* ID. */ - ma_copy_memory(pDeviceInfo->id.dsound, pDeviceID->dsound, 16); + MA_COPY_MEMORY(pDeviceInfo->id.dsound, pDeviceID->dsound, 16); /* Name / Description. This is retrieved by enumerating over each device until we find that one that matches the input ID. */ data.pDeviceID = pDeviceID; @@ -10407,7 +10888,7 @@ ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_typ /* I don't think there's a way to get the name of the default device with DirectSound. In this case we just need to use defaults. */ /* ID */ - ma_zero_memory(pDeviceInfo->id.dsound, 16); + MA_ZERO_MEMORY(pDeviceInfo->id.dsound, 16); /* Name / Description */ if (deviceType == ma_device_type_playback) { @@ -10430,7 +10911,7 @@ ma_result ma_context_get_device_info__dsound(ma_context* pContext, ma_device_typ return result; } - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); caps.dwSize = sizeof(caps); if (FAILED(ma_IDirectSound_GetCaps(pDirectSound, &caps))) { return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSound_GetCaps() failed for playback device.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); @@ -10537,20 +11018,20 @@ typedef struct ma_device_info* pInfo; } ma_device_enum_data__dsound; -BOOL CALLBACK ma_enum_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) +static BOOL CALLBACK ma_enum_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescription, LPCSTR lpcstrModule, LPVOID lpContext) { ma_device_enum_data__dsound* pData = (ma_device_enum_data__dsound*)lpContext; - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); if (pData->pInfo != NULL) { if (pData->infoCount > 0) { - ma_zero_object(pData->pInfo); + MA_ZERO_OBJECT(pData->pInfo); ma_strncpy_s(pData->pInfo->name, sizeof(pData->pInfo->name), lpcstrDescription, (size_t)-1); if (lpGuid != NULL) { - ma_copy_memory(pData->pInfo->id.dsound, lpGuid, 16); + MA_COPY_MEMORY(pData->pInfo->id.dsound, lpGuid, 16); } else { - ma_zero_memory(pData->pInfo->id.dsound, 16); + MA_ZERO_MEMORY(pData->pInfo->id.dsound, 16); } pData->pInfo += 1; @@ -10565,9 +11046,9 @@ BOOL CALLBACK ma_enum_devices_callback__dsound(LPGUID lpGuid, LPCSTR lpcstrDescr return TRUE; } -void ma_device_uninit__dsound(ma_device* pDevice) +static void ma_device_uninit__dsound(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->dsound.pCaptureBuffer != NULL) { ma_IDirectSoundCaptureBuffer_Release((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer); @@ -10587,7 +11068,7 @@ void ma_device_uninit__dsound(ma_device* pDevice) } } -ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, WAVEFORMATEXTENSIBLE* pWF) +static ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* pChannelMap, WAVEFORMATEXTENSIBLE* pWF) { GUID subformat; @@ -10611,7 +11092,7 @@ ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels return MA_FORMAT_NOT_SUPPORTED; } - ma_zero_object(pWF); + MA_ZERO_OBJECT(pWF); pWF->Format.cbSize = sizeof(*pWF); pWF->Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE; pWF->Format.nChannels = (WORD)channels; @@ -10626,13 +11107,13 @@ ma_result ma_config_to_WAVEFORMATEXTENSIBLE(ma_format format, ma_uint32 channels return MA_SUCCESS; } -ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result; ma_uint32 bufferSizeInMilliseconds; - ma_assert(pDevice != NULL); - ma_zero_object(&pDevice->dsound); + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->dsound); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -10694,7 +11175,7 @@ ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* p /* The size of the buffer must be a clean multiple of the period count. */ bufferSizeInFrames = (ma_calculate_buffer_size_in_frames_from_milliseconds(bufferSizeInMilliseconds, wf.Format.nSamplesPerSec) / pConfig->periods) * pConfig->periods; - ma_zero_object(&descDS); + MA_ZERO_OBJECT(&descDS); descDS.dwSize = sizeof(descDS); descDS.dwFlags = 0; descDS.dwBufferBytes = bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, wf.Format.nChannels); @@ -10761,7 +11242,7 @@ ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* p return result; } - ma_zero_object(&descDSPrimary); + MA_ZERO_OBJECT(&descDSPrimary); descDSPrimary.dwSize = sizeof(MA_DSBUFFERDESC); descDSPrimary.dwFlags = MA_DSBCAPS_PRIMARYBUFFER | MA_DSBCAPS_CTRLVOLUME; if (FAILED(ma_IDirectSound_CreateSoundBuffer((ma_IDirectSound*)pDevice->dsound.pPlayback, &descDSPrimary, (ma_IDirectSoundBuffer**)&pDevice->dsound.pPlaybackPrimaryBuffer, NULL))) { @@ -10771,7 +11252,7 @@ ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* p /* We may want to make some adjustments to the format if we are using defaults. */ - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); caps.dwSize = sizeof(caps); if (FAILED(ma_IDirectSound_GetCaps((ma_IDirectSound*)pDevice->dsound.pPlayback, &caps))) { ma_device_uninit__dsound(pDevice); @@ -10855,7 +11336,7 @@ ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* p sound cards; it was directly behind the write cursor. Now, if the DSBCAPS_GETCURRENTPOSITION2 flag is specified, the application can get a more accurate play cursor. */ - ma_zero_object(&descDS); + MA_ZERO_OBJECT(&descDS); descDS.dwSize = sizeof(descDS); descDS.dwFlags = MA_DSBCAPS_CTRLPOSITIONNOTIFY | MA_DSBCAPS_GLOBALFOCUS | MA_DSBCAPS_GETCURRENTPOSITION2; descDS.dwBufferBytes = bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); @@ -10875,20 +11356,21 @@ ma_result ma_device_init__dsound(ma_context* pContext, const ma_device_config* p } -ma_result ma_device_main_loop__dsound(ma_device* pDevice) +static ma_result ma_device_main_loop__dsound(ma_device* pDevice) { ma_result result = MA_SUCCESS; - ma_uint32 bpfCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 bpfPlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 bpfDeviceCapture = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 bpfDevicePlayback = ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); HRESULT hr; DWORD lockOffsetInBytesCapture; DWORD lockSizeInBytesCapture; DWORD mappedSizeInBytesCapture; - void* pMappedBufferCapture; + DWORD mappedDeviceFramesProcessedCapture; + void* pMappedDeviceBufferCapture; DWORD lockOffsetInBytesPlayback; DWORD lockSizeInBytesPlayback; DWORD mappedSizeInBytesPlayback; - void* pMappedBufferPlayback; + void* pMappedDeviceBufferPlayback; DWORD prevReadCursorInBytesCapture = 0; DWORD prevPlayCursorInBytesPlayback = 0; ma_bool32 physicalPlayCursorLoopFlagPlayback = 0; @@ -10898,7 +11380,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) ma_uint32 framesWrittenToPlaybackDevice = 0; /* For knowing whether or not the playback device needs to be started. */ ma_uint32 waitTimeInMilliseconds = 1; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* The first thing to do is start the capture device. The playback device is only started after the first period is written. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { @@ -10937,10 +11419,10 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, do it again from the start. */ - if (prevReadCursorInBytesCapture < pDevice->capture.internalBufferSizeInFrames*bpfCapture) { + if (prevReadCursorInBytesCapture < pDevice->capture.internalBufferSizeInFrames*bpfDeviceCapture) { /* Lock up to the end of the buffer. */ lockOffsetInBytesCapture = prevReadCursorInBytesCapture; - lockSizeInBytesCapture = (pDevice->capture.internalBufferSizeInFrames*bpfCapture) - prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalBufferSizeInFrames*bpfDeviceCapture) - prevReadCursorInBytesCapture; } else { /* Lock starting from the start of the buffer. */ lockOffsetInBytesCapture = 0; @@ -10953,32 +11435,37 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) continue; /* Nothing is available in the capture buffer. */ } - hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); if (FAILED(hr)) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); } /* At this point we have some input data that we need to output. We do not return until every mapped frame of the input data is written to the playback device. */ - pDevice->capture._dspFrameCount = mappedSizeInBytesCapture / bpfCapture; - pDevice->capture._dspFrames = (const ma_uint8*)pMappedBufferCapture; + mappedDeviceFramesProcessedCapture = 0; + for (;;) { /* Keep writing to the playback device. */ - ma_uint8 inputFramesInExternalFormat[4096]; - ma_uint32 inputFramesInExternalFormatCap = sizeof(inputFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 inputFramesInExternalFormatCount; - ma_uint8 outputFramesInExternalFormat[4096]; - ma_uint32 outputFramesInExternalFormatCap = sizeof(outputFramesInExternalFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - inputFramesInExternalFormatCount = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, inputFramesInExternalFormat, ma_min(inputFramesInExternalFormatCap, outputFramesInExternalFormatCap)); - if (inputFramesInExternalFormatCount == 0) { - break; /* No more input data. */ + ma_uint8 inputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 inputFramesInClientFormatCap = sizeof(inputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint8 outputFramesInClientFormat[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 outputFramesInClientFormatCap = sizeof(outputFramesInClientFormat) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint32 outputFramesInClientFormatCount; + ma_uint32 outputFramesInClientFormatConsumed = 0; + ma_uint64 clientCapturedFramesToProcess = ma_min(inputFramesInClientFormatCap, outputFramesInClientFormatCap); + ma_uint64 deviceCapturedFramesToProcess = (mappedSizeInBytesCapture / bpfDeviceCapture) - mappedDeviceFramesProcessedCapture; + void* pRunningMappedDeviceBufferCapture = ma_offset_ptr(pMappedDeviceBufferCapture, mappedDeviceFramesProcessedCapture * bpfDeviceCapture); + + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningMappedDeviceBufferCapture, &deviceCapturedFramesToProcess, inputFramesInClientFormat, &clientCapturedFramesToProcess); + if (result != MA_SUCCESS) { + break; } - ma_device__on_data(pDevice, outputFramesInExternalFormat, inputFramesInExternalFormat, inputFramesInExternalFormatCount); + outputFramesInClientFormatCount = (ma_uint32)clientCapturedFramesToProcess; + mappedDeviceFramesProcessedCapture += (ma_uint32)deviceCapturedFramesToProcess; - /* At this point we have input and output data in external format. All we need to do now is convert it to the output format. This may take a few passes. */ - pDevice->playback._dspFrameCount = inputFramesInExternalFormatCount; - pDevice->playback._dspFrames = (const ma_uint8*)outputFramesInExternalFormat; + ma_device__on_data(pDevice, outputFramesInClientFormat, inputFramesInClientFormat, (ma_uint32)clientCapturedFramesToProcess); + + /* At this point we have input and output data in client format. All we need to do now is convert it to the output device format. This may take a few passes. */ for (;;) { ma_uint32 framesWrittenThisIteration; DWORD physicalPlayCursorInBytes; @@ -11000,7 +11487,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { - availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ } else { /* This is an error. */ @@ -11046,13 +11533,13 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { /* Same loop iteration. Go up to the end of the buffer. */ - lockSizeInBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback; + lockSizeInBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; } else { /* Different loop iterations. Go up to the physical play cursor. */ lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; } - hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); if (FAILED(hr)) { result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); break; @@ -11063,9 +11550,9 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) endless glitching due to it constantly running out of data. */ if (isPlaybackDeviceStarted) { - DWORD bytesQueuedForPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - availableBytesPlayback; - if (bytesQueuedForPlayback < ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*bpfPlayback)) { - silentPaddingInBytes = ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*2*bpfPlayback) - bytesQueuedForPlayback; + DWORD bytesQueuedForPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) - availableBytesPlayback; + if (bytesQueuedForPlayback < ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*bpfDevicePlayback)) { + silentPaddingInBytes = ((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)*2*bpfDevicePlayback) - bytesQueuedForPlayback; if (silentPaddingInBytes > lockSizeInBytesPlayback) { silentPaddingInBytes = lockSizeInBytesPlayback; } @@ -11078,21 +11565,32 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) /* At this point we have a buffer for output. */ if (silentPaddingInBytes > 0) { - ma_zero_memory(pMappedBufferPlayback, silentPaddingInBytes); - framesWrittenThisIteration = silentPaddingInBytes/bpfPlayback; + MA_ZERO_MEMORY(pMappedDeviceBufferPlayback, silentPaddingInBytes); + framesWrittenThisIteration = silentPaddingInBytes/bpfDevicePlayback; } else { - framesWrittenThisIteration = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, pMappedBufferPlayback, mappedSizeInBytesPlayback/bpfPlayback); + ma_uint64 convertedFrameCountIn = (outputFramesInClientFormatCount - outputFramesInClientFormatConsumed); + ma_uint64 convertedFrameCountOut = mappedSizeInBytesPlayback/bpfDevicePlayback; + void* pConvertedFramesIn = ma_offset_ptr(outputFramesInClientFormat, outputFramesInClientFormatConsumed * bpfDevicePlayback); + void* pConvertedFramesOut = pMappedDeviceBufferPlayback; + + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, pConvertedFramesIn, &convertedFrameCountIn, pConvertedFramesOut, &convertedFrameCountOut); + if (result != MA_SUCCESS) { + break; + } + + outputFramesInClientFormatConsumed += (ma_uint32)convertedFrameCountOut; + framesWrittenThisIteration = (ma_uint32)convertedFrameCountOut; } - hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedBufferPlayback, framesWrittenThisIteration*bpfPlayback, NULL, 0); + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, framesWrittenThisIteration*bpfDevicePlayback, NULL, 0); if (FAILED(hr)) { result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); break; } - virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfPlayback; - if ((virtualWriteCursorInBytesPlayback/bpfPlayback) == pDevice->playback.internalBufferSizeInFrames) { + virtualWriteCursorInBytesPlayback += framesWrittenThisIteration*bpfDevicePlayback; + if ((virtualWriteCursorInBytesPlayback/bpfDevicePlayback) == pDevice->playback.internalBufferSizeInFrames) { virtualWriteCursorInBytesPlayback = 0; virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; } @@ -11110,19 +11608,19 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) isPlaybackDeviceStarted = MA_TRUE; } - if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfPlayback) { + if (framesWrittenThisIteration < mappedSizeInBytesPlayback/bpfDevicePlayback) { break; /* We're finished with the output data.*/ } } - if (inputFramesInExternalFormatCount < inputFramesInExternalFormatCap) { + if (clientCapturedFramesToProcess == 0) { break; /* We just consumed every input sample. */ } } /* At this point we're done with the mapped portion of the capture buffer. */ - hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedBufferCapture, mappedSizeInBytesCapture, NULL, 0); + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); if (FAILED(hr)) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); } @@ -11155,10 +11653,10 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) The capture position has looped. This is the more complex case. Map to the end of the buffer. If this does not return anything, do it again from the start. */ - if (prevReadCursorInBytesCapture < pDevice->capture.internalBufferSizeInFrames*bpfCapture) { + if (prevReadCursorInBytesCapture < pDevice->capture.internalBufferSizeInFrames*bpfDeviceCapture) { /* Lock up to the end of the buffer. */ lockOffsetInBytesCapture = prevReadCursorInBytesCapture; - lockSizeInBytesCapture = (pDevice->capture.internalBufferSizeInFrames*bpfCapture) - prevReadCursorInBytesCapture; + lockSizeInBytesCapture = (pDevice->capture.internalBufferSizeInFrames*bpfDeviceCapture) - prevReadCursorInBytesCapture; } else { /* Lock starting from the start of the buffer. */ lockOffsetInBytesCapture = 0; @@ -11176,7 +11674,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) continue; /* Nothing is available in the capture buffer. */ } - hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); + hr = ma_IDirectSoundCaptureBuffer_Lock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, lockOffsetInBytesCapture, lockSizeInBytesCapture, &pMappedDeviceBufferCapture, &mappedSizeInBytesCapture, NULL, NULL, 0); if (FAILED(hr)) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from capture device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); } @@ -11187,15 +11685,15 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) } #endif - ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfCapture, pMappedBufferCapture); + ma_device__send_frames_to_client(pDevice, mappedSizeInBytesCapture/bpfDeviceCapture, pMappedDeviceBufferCapture); - hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedBufferCapture, mappedSizeInBytesCapture, NULL, 0); + hr = ma_IDirectSoundCaptureBuffer_Unlock((ma_IDirectSoundCaptureBuffer*)pDevice->dsound.pCaptureBuffer, pMappedDeviceBufferCapture, mappedSizeInBytesCapture, NULL, 0); if (FAILED(hr)) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from capture device after reading from the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); } prevReadCursorInBytesCapture = lockOffsetInBytesCapture + mappedSizeInBytesCapture; - if (prevReadCursorInBytesCapture == (pDevice->capture.internalBufferSizeInFrames*bpfCapture)) { + if (prevReadCursorInBytesCapture == (pDevice->capture.internalBufferSizeInFrames*bpfDeviceCapture)) { prevReadCursorInBytesCapture = 0; } } break; @@ -11220,7 +11718,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { - availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ } else { /* This is an error. */ @@ -11264,29 +11762,29 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) lockOffsetInBytesPlayback = virtualWriteCursorInBytesPlayback; if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { /* Same loop iteration. Go up to the end of the buffer. */ - lockSizeInBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback; + lockSizeInBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; } else { /* Different loop iterations. Go up to the physical play cursor. */ lockSizeInBytesPlayback = physicalPlayCursorInBytes - virtualWriteCursorInBytesPlayback; } - hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); + hr = ma_IDirectSoundBuffer_Lock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, lockOffsetInBytesPlayback, lockSizeInBytesPlayback, &pMappedDeviceBufferPlayback, &mappedSizeInBytesPlayback, NULL, NULL, 0); if (FAILED(hr)) { result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to map buffer from playback device in preparation for writing to the device.", MA_FAILED_TO_MAP_DEVICE_BUFFER); break; } /* At this point we have a buffer for output. */ - ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfPlayback), pMappedBufferPlayback); + ma_device__read_frames_from_client(pDevice, (mappedSizeInBytesPlayback/bpfDevicePlayback), pMappedDeviceBufferPlayback); - hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedBufferPlayback, mappedSizeInBytesPlayback, NULL, 0); + hr = ma_IDirectSoundBuffer_Unlock((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, pMappedDeviceBufferPlayback, mappedSizeInBytesPlayback, NULL, 0); if (FAILED(hr)) { result = ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] Failed to unlock internal buffer from playback device after writing to the device.", MA_FAILED_TO_UNMAP_DEVICE_BUFFER); break; } virtualWriteCursorInBytesPlayback += mappedSizeInBytesPlayback; - if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalBufferSizeInFrames*bpfPlayback) { + if (virtualWriteCursorInBytesPlayback == pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) { virtualWriteCursorInBytesPlayback = 0; virtualWriteCursorLoopFlagPlayback = !virtualWriteCursorLoopFlagPlayback; } @@ -11295,7 +11793,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) We may need to start the device. We want two full periods to be written before starting the playback device. Having an extra period adds a bit of a buffer to prevent the playback buffer from getting starved. */ - framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfPlayback; + framesWrittenToPlaybackDevice += mappedSizeInBytesPlayback/bpfDevicePlayback; if (!isPlaybackDeviceStarted && framesWrittenToPlaybackDevice >= (pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods)) { if (FAILED(ma_IDirectSoundBuffer_Play((ma_IDirectSoundBuffer*)pDevice->dsound.pPlaybackBuffer, 0, 0, MA_DSBPLAY_LOOPING))) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[DirectSound] IDirectSoundBuffer_Play() failed.", MA_FAILED_TO_START_BACKEND_DEVICE); @@ -11339,7 +11837,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) if (physicalPlayCursorLoopFlagPlayback == virtualWriteCursorLoopFlagPlayback) { /* Same loop iteration. The available bytes wraps all the way around from the virtual write cursor to the physical play cursor. */ if (physicalPlayCursorInBytes <= virtualWriteCursorInBytesPlayback) { - availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfPlayback) - virtualWriteCursorInBytesPlayback; + availableBytesPlayback = (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback) - virtualWriteCursorInBytesPlayback; availableBytesPlayback += physicalPlayCursorInBytes; /* Wrap around. */ } else { break; @@ -11353,7 +11851,7 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) } } - if (availableBytesPlayback >= (pDevice->playback.internalBufferSizeInFrames*bpfPlayback)) { + if (availableBytesPlayback >= (pDevice->playback.internalBufferSizeInFrames*bpfDevicePlayback)) { break; } @@ -11371,19 +11869,19 @@ ma_result ma_device_main_loop__dsound(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_context_uninit__dsound(ma_context* pContext) +static ma_result ma_context_uninit__dsound(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_dsound); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_dsound); ma_dlclose(pContext, pContext->dsound.hDSoundDLL); return MA_SUCCESS; } -ma_result ma_context_init__dsound(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__dsound(const ma_context_config* pConfig, ma_context* pContext) { - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)pConfig; @@ -11471,7 +11969,7 @@ typedef MMRESULT (WINAPI * MA_PFN_waveInAddBuffer)(HWAVEIN hwi, LPWAVEHDR pwh, U typedef MMRESULT (WINAPI * MA_PFN_waveInStart)(HWAVEIN hwi); typedef MMRESULT (WINAPI * MA_PFN_waveInReset)(HWAVEIN hwi); -ma_result ma_result_from_MMRESULT(MMRESULT resultMM) +static ma_result ma_result_from_MMRESULT(MMRESULT resultMM) { switch (resultMM) { case MMSYSERR_NOERROR: return MA_SUCCESS; @@ -11486,7 +11984,7 @@ ma_result ma_result_from_MMRESULT(MMRESULT resultMM) } } -char* ma_find_last_character(char* str, char ch) +static char* ma_find_last_character(char* str, char ch) { char* last; @@ -11506,6 +12004,12 @@ char* ma_find_last_character(char* str, char ch) return last; } +static ma_uint32 ma_get_fragment_size_in_bytes(ma_uint32 bufferSizeInFrames, ma_uint32 periods, ma_format format, ma_uint32 channels) +{ + ma_uint32 fragmentSizeInFrames = bufferSizeInFrames / periods; + return fragmentSizeInFrames * ma_get_bytes_per_frame(format, channels); +} + /* Our own "WAVECAPS" structure that contains generic information shared between WAVEOUTCAPS2 and WAVEINCAPS2 so @@ -11519,7 +12023,7 @@ typedef struct GUID NameGuid; } MA_WAVECAPSA; -ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate) +static ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD channels, WORD* pBitsPerSample, DWORD* pSampleRate) { WORD bitsPerSample = 0; DWORD sampleRate = 0; @@ -11599,11 +12103,11 @@ ma_result ma_get_best_info_from_formats_flags__winmm(DWORD dwFormats, WORD chann return MA_SUCCESS; } -ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, WAVEFORMATEX* pWF) +static ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels, WAVEFORMATEX* pWF) { - ma_assert(pWF != NULL); + MA_ASSERT(pWF != NULL); - ma_zero_object(pWF); + MA_ZERO_OBJECT(pWF); pWF->cbSize = sizeof(*pWF); pWF->wFormatTag = WAVE_FORMAT_PCM; pWF->nChannels = (WORD)channels; @@ -11675,15 +12179,15 @@ ma_result ma_formats_flags_to_WAVEFORMATEX__winmm(DWORD dwFormats, WORD channels return MA_SUCCESS; } -ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVECAPSA* pCaps, ma_device_info* pDeviceInfo) { WORD bitsPerSample; DWORD sampleRate; ma_result result; - ma_assert(pContext != NULL); - ma_assert(pCaps != NULL); - ma_assert(pDeviceInfo != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); /* Name / Description @@ -11772,30 +12276,30 @@ ma_result ma_context_get_device_info_from_WAVECAPS(ma_context* pContext, MA_WAVE return MA_SUCCESS; } -ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info_from_WAVEOUTCAPS2(ma_context* pContext, MA_WAVEOUTCAPS2A* pCaps, ma_device_info* pDeviceInfo) { MA_WAVECAPSA caps; - ma_assert(pContext != NULL); - ma_assert(pCaps != NULL); - ma_assert(pDeviceInfo != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); - ma_copy_memory(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); caps.dwFormats = pCaps->dwFormats; caps.wChannels = pCaps->wChannels; caps.NameGuid = pCaps->NameGuid; return ma_context_get_device_info_from_WAVECAPS(pContext, &caps, pDeviceInfo); } -ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_WAVEINCAPS2A* pCaps, ma_device_info* pDeviceInfo) { MA_WAVECAPSA caps; - ma_assert(pContext != NULL); - ma_assert(pCaps != NULL); - ma_assert(pDeviceInfo != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pCaps != NULL); + MA_ASSERT(pDeviceInfo != NULL); - ma_copy_memory(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); + MA_COPY_MEMORY(caps.szPname, pCaps->szPname, sizeof(caps.szPname)); caps.dwFormats = pCaps->dwFormats; caps.wChannels = pCaps->wChannels; caps.NameGuid = pCaps->NameGuid; @@ -11803,25 +12307,25 @@ ma_result ma_context_get_device_info_from_WAVEINCAPS2(ma_context* pContext, MA_W } -ma_bool32 ma_context_is_device_id_equal__winmm(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__winmm(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return pID0->winmm == pID1->winmm; } -ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { UINT playbackDeviceCount; UINT captureDeviceCount; UINT iPlaybackDevice; UINT iCaptureDevice; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); /* Playback. */ playbackDeviceCount = ((MA_PFN_waveOutGetNumDevs)pContext->winmm.waveOutGetNumDevs)(); @@ -11829,13 +12333,13 @@ ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devi MMRESULT result; MA_WAVEOUTCAPS2A caps; - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(iPlaybackDevice, (WAVEOUTCAPSA*)&caps, sizeof(caps)); if (result == MMSYSERR_NOERROR) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); deviceInfo.id.winmm = iPlaybackDevice; if (ma_context_get_device_info_from_WAVEOUTCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { @@ -11853,13 +12357,13 @@ ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devi MMRESULT result; MA_WAVEINCAPS2A caps; - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(iCaptureDevice, (WAVEINCAPSA*)&caps, sizeof(caps)); if (result == MMSYSERR_NOERROR) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); deviceInfo.id.winmm = iCaptureDevice; if (ma_context_get_device_info_from_WAVEINCAPS2(pContext, &caps, &deviceInfo) == MA_SUCCESS) { @@ -11874,11 +12378,11 @@ ma_result ma_context_enumerate_devices__winmm(ma_context* pContext, ma_enum_devi return MA_SUCCESS; } -ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { UINT winMMDeviceID; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (shareMode == ma_share_mode_exclusive) { return MA_SHARE_MODE_NOT_SUPPORTED; @@ -11895,7 +12399,7 @@ ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type MMRESULT result; MA_WAVEOUTCAPS2A caps; - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); result = ((MA_PFN_waveOutGetDevCapsA)pContext->winmm.waveOutGetDevCapsA)(winMMDeviceID, (WAVEOUTCAPSA*)&caps, sizeof(caps)); if (result == MMSYSERR_NOERROR) { @@ -11905,7 +12409,7 @@ ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type MMRESULT result; MA_WAVEINCAPS2A caps; - ma_zero_object(&caps); + MA_ZERO_OBJECT(&caps); result = ((MA_PFN_waveInGetDevCapsA)pContext->winmm.waveInGetDevCapsA)(winMMDeviceID, (WAVEINCAPSA*)&caps, sizeof(caps)); if (result == MMSYSERR_NOERROR) { @@ -11917,9 +12421,9 @@ ma_result ma_context_get_device_info__winmm(ma_context* pContext, ma_device_type } -void ma_device_uninit__winmm(ma_device* pDevice) +static void ma_device_uninit__winmm(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((MA_PFN_waveInClose)pDevice->pContext->winmm.waveInClose)((HWAVEIN)pDevice->winmm.hDeviceCapture); @@ -11932,12 +12436,12 @@ void ma_device_uninit__winmm(ma_device* pDevice) CloseHandle((HANDLE)pDevice->winmm.hEventPlayback); } - ma_free(pDevice->winmm._pHeapData); + ma__free_from_callbacks(pDevice->winmm._pHeapData, &pDevice->pContext->allocationCallbacks); - ma_zero_object(&pDevice->winmm); /* Safety. */ + MA_ZERO_OBJECT(&pDevice->winmm); /* Safety. */ } -ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { const char* errorMsg = ""; ma_result errorCode = MA_ERROR; @@ -11947,8 +12451,8 @@ ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pC UINT winMMDeviceIDCapture = 0; ma_uint32 bufferSizeInMilliseconds; - ma_assert(pDevice != NULL); - ma_zero_object(&pDevice->winmm); + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->winmm); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -12072,13 +12576,13 @@ ma_result ma_device_init__winmm(ma_context* pContext, const ma_device_config* pC heapSize += sizeof(WAVEHDR)*pDevice->playback.internalPeriods + (pDevice->playback.internalBufferSizeInFrames*ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); } - pDevice->winmm._pHeapData = (ma_uint8*)ma_malloc(heapSize); + pDevice->winmm._pHeapData = (ma_uint8*)ma__calloc_from_callbacks(heapSize, &pContext->allocationCallbacks); if (pDevice->winmm._pHeapData == NULL) { errorMsg = "[WinMM] Failed to allocate memory for the intermediary buffer.", errorCode = MA_OUT_OF_MEMORY; goto on_error; } - ma_zero_memory(pDevice->winmm._pHeapData, heapSize); + MA_ZERO_MEMORY(pDevice->winmm._pHeapData, heapSize); if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { ma_uint32 iPeriod; @@ -12162,15 +12666,15 @@ on_error: ((MA_PFN_waveOutClose)pContext->winmm.waveOutClose)((HWAVEOUT)pDevice->winmm.hDevicePlayback); } - ma_free(pDevice->winmm._pHeapData); + ma__free_from_callbacks(pDevice->winmm._pHeapData, &pContext->allocationCallbacks); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, errorMsg, errorCode); } -ma_result ma_device_stop__winmm(ma_device* pDevice) +static ma_result ma_device_stop__winmm(ma_device* pDevice) { MMRESULT resultMM; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { if (pDevice->winmm.hDeviceCapture == NULL) { @@ -12197,15 +12701,15 @@ ma_result ma_device_stop__winmm(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { ma_result result = MA_SUCCESS; MMRESULT resultMM; ma_uint32 totalFramesWritten; WAVEHDR* pWAVEHDR; - ma_assert(pDevice != NULL); - ma_assert(pPCMFrames != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); if (pFramesWritten != NULL) { *pFramesWritten = 0; @@ -12228,7 +12732,7 @@ ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_ ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesWritten)); const void* pSrc = ma_offset_ptr(pPCMFrames, totalFramesWritten*bpf); void* pDst = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderPlayback].lpData, pDevice->winmm.headerFramesConsumedPlayback*bpf); - ma_copy_memory(pDst, pSrc, framesToCopy*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); pDevice->winmm.headerFramesConsumedPlayback += framesToCopy; totalFramesWritten += framesToCopy; @@ -12255,7 +12759,7 @@ ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_ } /* If at this point we have consumed the entire input buffer we can return. */ - ma_assert(totalFramesWritten <= frameCount); + MA_ASSERT(totalFramesWritten <= frameCount); if (totalFramesWritten == frameCount) { break; } @@ -12289,15 +12793,15 @@ ma_result ma_device_write__winmm(ma_device* pDevice, const void* pPCMFrames, ma_ return result; } -ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) { ma_result result = MA_SUCCESS; MMRESULT resultMM; ma_uint32 totalFramesRead; WAVEHDR* pWAVEHDR; - ma_assert(pDevice != NULL); - ma_assert(pPCMFrames != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); if (pFramesRead != NULL) { *pFramesRead = 0; @@ -12317,7 +12821,7 @@ ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 ma_uint32 framesToCopy = ma_min(framesRemainingInHeader, (frameCount - totalFramesRead)); const void* pSrc = ma_offset_ptr(pWAVEHDR[pDevice->winmm.iNextHeaderCapture].lpData, pDevice->winmm.headerFramesConsumedCapture*bpf); void* pDst = ma_offset_ptr(pPCMFrames, totalFramesRead*bpf); - ma_copy_memory(pDst, pSrc, framesToCopy*bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy*bpf); pDevice->winmm.headerFramesConsumedCapture += framesToCopy; totalFramesRead += framesToCopy; @@ -12344,7 +12848,7 @@ ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 } /* If at this point we have filled the entire input buffer we can return. */ - ma_assert(totalFramesRead <= frameCount); + MA_ASSERT(totalFramesRead <= frameCount); if (totalFramesRead == frameCount) { break; } @@ -12378,12 +12882,12 @@ ma_result ma_device_read__winmm(ma_device* pDevice, void* pPCMFrames, ma_uint32 return result; } -ma_result ma_device_main_loop__winmm(ma_device* pDevice) +static ma_result ma_device_main_loop__winmm(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* The capture device needs to be started immediately. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { @@ -12421,84 +12925,95 @@ ma_result ma_device_main_loop__winmm(ma_device* pDevice) case ma_device_type_duplex: { /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__winmm(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__winmm(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__winmm(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__winmm(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__winmm()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } break; case ma_device_type_capture: { /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); ma_uint32 periodSizeInFrames = pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods; ma_uint32 framesReadThisPeriod = 0; @@ -12525,7 +13040,7 @@ ma_result ma_device_main_loop__winmm(ma_device* pDevice) case ma_device_type_playback: { /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); ma_uint32 periodSizeInFrames = pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods; ma_uint32 framesWrittenThisPeriod = 0; @@ -12562,18 +13077,18 @@ ma_result ma_device_main_loop__winmm(ma_device* pDevice) return result; } -ma_result ma_context_uninit__winmm(ma_context* pContext) +static ma_result ma_context_uninit__winmm(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_winmm); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_winmm); ma_dlclose(pContext, pContext->winmm.hWinMM); return MA_SUCCESS; } -ma_result ma_context_init__winmm(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__winmm(const ma_context_config* pConfig, ma_context* pContext) { - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)pConfig; @@ -12877,7 +13392,7 @@ typedef const char* (* ma_snd_pcm_info_get_name_proc) ( typedef int (* ma_snd_config_update_free_global_proc) (); /* This array specifies each of the common devices that can be used for both playback and capture. */ -const char* g_maCommonDeviceNamesALSA[] = { +static const char* g_maCommonDeviceNamesALSA[] = { "default", "null", "pulse", @@ -12885,12 +13400,12 @@ const char* g_maCommonDeviceNamesALSA[] = { }; /* This array allows us to blacklist specific playback devices. */ -const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = { +static const char* g_maBlacklistedPlaybackDeviceNamesALSA[] = { "" }; /* This array allows us to blacklist specific capture devices. */ -const char* g_maBlacklistedCaptureDeviceNamesALSA[] = { +static const char* g_maBlacklistedCaptureDeviceNamesALSA[] = { "" }; @@ -12908,7 +13423,7 @@ static struct {"bcm2835 ALSA", 2.0f} }; -float ma_find_default_buffer_size_scale__alsa(const char* deviceName) +static float ma_find_default_buffer_size_scale__alsa(const char* deviceName) { size_t i; @@ -12925,7 +13440,7 @@ float ma_find_default_buffer_size_scale__alsa(const char* deviceName) return 1; } -ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format) +static ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format) { ma_snd_pcm_format_t ALSAFormats[] = { MA_SND_PCM_FORMAT_UNKNOWN, /* ma_format_unknown */ @@ -12948,7 +13463,7 @@ ma_snd_pcm_format_t ma_convert_ma_format_to_alsa_format(ma_format format) return ALSAFormats[format]; } -ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA) +static ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA) { if (ma_is_little_endian()) { switch (formatALSA) { @@ -12975,7 +13490,7 @@ ma_format ma_format_from_alsa(ma_snd_pcm_format_t formatALSA) } } -ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos) +static ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChannelPos) { switch (alsaChannelPos) { @@ -13011,7 +13526,7 @@ ma_channel ma_convert_alsa_channel_position_to_ma_channel(unsigned int alsaChann return 0; } -ma_bool32 ma_is_common_device_name__alsa(const char* name) +static ma_bool32 ma_is_common_device_name__alsa(const char* name) { size_t iName; for (iName = 0; iName < ma_countof(g_maCommonDeviceNamesALSA); ++iName) { @@ -13024,7 +13539,7 @@ ma_bool32 ma_is_common_device_name__alsa(const char* name) } -ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name) +static ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name) { size_t iName; for (iName = 0; iName < ma_countof(g_maBlacklistedPlaybackDeviceNamesALSA); ++iName) { @@ -13036,7 +13551,7 @@ ma_bool32 ma_is_playback_device_blacklisted__alsa(const char* name) return MA_FALSE; } -ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name) +static ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name) { size_t iName; for (iName = 0; iName < ma_countof(g_maBlacklistedCaptureDeviceNamesALSA); ++iName) { @@ -13048,7 +13563,7 @@ ma_bool32 ma_is_capture_device_blacklisted__alsa(const char* name) return MA_FALSE; } -ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name) +static ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* name) { if (deviceType == ma_device_type_playback) { return ma_is_playback_device_blacklisted__alsa(name); @@ -13058,7 +13573,7 @@ ma_bool32 ma_is_device_blacklisted__alsa(ma_device_type deviceType, const char* } -const char* ma_find_char(const char* str, char c, int* index) +static const char* ma_find_char(const char* str, char c, int* index) { int i = 0; for (;;) { @@ -13080,7 +13595,7 @@ const char* ma_find_char(const char* str, char c, int* index) return NULL; } -ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid) +static ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid) { /* This function is just checking whether or not hwid is in "hw:%d,%d" format. */ @@ -13124,7 +13639,7 @@ ma_bool32 ma_is_device_name_in_hw_format__alsa(const char* hwid) return MA_TRUE; } -int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */ +static int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, size_t dstSize, const char* src) /* Returns 0 on success, non-0 on error. */ { /* src should look something like this: "hw:CARD=I82801AAICH,DEV=0" */ @@ -13188,11 +13703,11 @@ int ma_convert_device_name_to_hw_format__alsa(ma_context* pContext, char* dst, s return 0; } -ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID) +static ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 count, const char* pHWID) { ma_uint32 i; - ma_assert(pHWID != NULL); + MA_ASSERT(pHWID != NULL); for (i = 0; i < count; ++i) { if (ma_strcmp(pUniqueIDs[i].alsa, pHWID) == 0) { @@ -13204,14 +13719,14 @@ ma_bool32 ma_does_id_exist_in_list__alsa(ma_device_id* pUniqueIDs, ma_uint32 cou } -ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_snd_pcm_t** ppPCM) +static ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMode, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_snd_pcm_t** ppPCM) { ma_snd_pcm_t* pPCM; ma_snd_pcm_stream_t stream; int openMode; - ma_assert(pContext != NULL); - ma_assert(ppPCM != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppPCM != NULL); *ppPCM = NULL; pPCM = NULL; @@ -13330,17 +13845,17 @@ ma_result ma_context_open_pcm__alsa(ma_context* pContext, ma_share_mode shareMod } -ma_bool32 ma_context_is_device_id_equal__alsa(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__alsa(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return ma_strcmp(pID0->alsa, pID1->alsa) == 0; } -ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_bool32 cbResult = MA_TRUE; char** ppDeviceHints; @@ -13348,8 +13863,8 @@ ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devic ma_uint32 uniqueIDCount = 0; char** ppNextDeviceHint; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); ma_mutex_lock(&pContext->alsa.internalDeviceEnumLock); @@ -13400,21 +13915,23 @@ ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devic goto next_device; /* The device has already been enumerated. Move on to the next one. */ } else { /* The device has not yet been enumerated. Make sure it's added to our list so that it's not enumerated again. */ - ma_device_id* pNewUniqueIDs = (ma_device_id*)ma_realloc(pUniqueIDs, sizeof(*pUniqueIDs) * (uniqueIDCount + 1)); + size_t oldCapacity = sizeof(*pUniqueIDs) * uniqueIDCount; + size_t newCapacity = sizeof(*pUniqueIDs) * (uniqueIDCount + 1); + ma_device_id* pNewUniqueIDs = (ma_device_id*)ma__realloc_from_callbacks(pUniqueIDs, newCapacity, oldCapacity, &pContext->allocationCallbacks); if (pNewUniqueIDs == NULL) { goto next_device; /* Failed to allocate memory. */ } pUniqueIDs = pNewUniqueIDs; - ma_copy_memory(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid)); + MA_COPY_MEMORY(pUniqueIDs[uniqueIDCount].alsa, hwid, sizeof(hwid)); uniqueIDCount += 1; } } } else { - ma_zero_memory(hwid, sizeof(hwid)); + MA_ZERO_MEMORY(hwid, sizeof(hwid)); } - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.id.alsa, sizeof(deviceInfo.id.alsa), hwid, (size_t)-1); /* @@ -13488,7 +14005,7 @@ ma_result ma_context_enumerate_devices__alsa(ma_context* pContext, ma_enum_devic } } - ma_free(pUniqueIDs); + ma__free_from_callbacks(pUniqueIDs, &pContext->allocationCallbacks); ((ma_snd_device_name_free_hint_proc)pContext->alsa.snd_device_name_free_hint)((void**)ppDeviceHints); ma_mutex_unlock(&pContext->alsa.internalDeviceEnumLock); @@ -13506,10 +14023,10 @@ typedef struct ma_bool32 foundDevice; } ma_context_get_device_info_enum_callback_data__alsa; -ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) +static ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pDeviceInfo, void* pUserData) { ma_context_get_device_info_enum_callback_data__alsa* pData = (ma_context_get_device_info_enum_callback_data__alsa*)pUserData; - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); if (pData->pDeviceID == NULL && ma_strcmp(pDeviceInfo->id.alsa, "default") == 0) { ma_strncpy_s(pData->pDeviceInfo->name, sizeof(pData->pDeviceInfo->name), pDeviceInfo->name, (size_t)-1); @@ -13525,7 +14042,7 @@ ma_bool32 ma_context_get_device_info_enum_callback__alsa(ma_context* pContext, m return !pData->foundDevice; } -ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_context_get_device_info_enum_callback_data__alsa data; ma_result result; @@ -13534,7 +14051,7 @@ ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type ma_snd_pcm_format_mask_t* pFormatMask; int sampleRateDir = 0; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); /* We just enumerate to find basic information about the device. */ data.deviceType = deviceType; @@ -13558,7 +14075,7 @@ ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type } /* We need to initialize a HW parameters object in order to know what formats are supported. */ - pHWParams = (ma_snd_pcm_hw_params_t*)calloc(1, ((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)()); + pHWParams = (ma_snd_pcm_hw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks); if (pHWParams == NULL) { return MA_OUT_OF_MEMORY; } @@ -13573,7 +14090,7 @@ ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type ((ma_snd_pcm_hw_params_get_rate_max_proc)pContext->alsa.snd_pcm_hw_params_get_rate_max)(pHWParams, &pDeviceInfo->maxSampleRate, &sampleRateDir); /* Formats. */ - pFormatMask = (ma_snd_pcm_format_mask_t*)calloc(1, ((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)()); + pFormatMask = (ma_snd_pcm_format_mask_t*)ma__calloc_from_callbacks(((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)(), &pContext->allocationCallbacks); if (pFormatMask == NULL) { return MA_OUT_OF_MEMORY; } @@ -13597,8 +14114,8 @@ ma_result ma_context_get_device_info__alsa(ma_context* pContext, ma_device_type pDeviceInfo->formats[pDeviceInfo->formatCount++] = ma_format_f32; } - ma_free(pFormatMask); - ma_free(pHWParams); + ma__free_from_callbacks(pFormatMask, &pContext->allocationCallbacks); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pContext->alsa.snd_pcm_close)(pPCM); return MA_SUCCESS; @@ -13612,9 +14129,9 @@ value is the number of frames available. This will return early if the main loop is broken with ma_device__break_main_loop(). */ -ma_uint32 ma_device__wait_for_frames__alsa(ma_device* pDevice, ma_bool32* pRequiresRestart) +static ma_uint32 ma_device__wait_for_frames__alsa(ma_device* pDevice, ma_bool32* pRequiresRestart) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pRequiresRestart) *pRequiresRestart = MA_FALSE; @@ -13672,9 +14189,9 @@ ma_uint32 ma_device__wait_for_frames__alsa(ma_device* pDevice, ma_bool32* pRequi return framesAvailable; } -ma_bool32 ma_device_read_from_client_and_write__alsa(ma_device* pDevice) +static ma_bool32 ma_device_read_from_client_and_write__alsa(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (!ma_device_is_started(pDevice) && ma_device__get_state(pDevice) != MA_STATE_STARTING) { return MA_FALSE; } @@ -13773,9 +14290,9 @@ ma_bool32 ma_device_read_from_client_and_write__alsa(ma_device* pDevice) return MA_TRUE; } -ma_bool32 ma_device_read_and_send_to_client__alsa(ma_device* pDevice) +static ma_bool32 ma_device_read_and_send_to_client__alsa(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (!ma_device_is_started(pDevice)) { return MA_FALSE; } @@ -13872,9 +14389,9 @@ ma_bool32 ma_device_read_and_send_to_client__alsa(ma_device* pDevice) } #endif /* 0 */ -void ma_device_uninit__alsa(ma_device* pDevice) +static void ma_device_uninit__alsa(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if ((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture) { ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture); @@ -13885,7 +14402,7 @@ void ma_device_uninit__alsa(ma_device* pDevice) } } -ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +static ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) { ma_result result; ma_snd_pcm_t* pPCM; @@ -13904,10 +14421,10 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con ma_snd_pcm_uframes_t bufferBoundary; float bufferSizeScaleFactor; - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */ - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should only be called for playback _or_ capture, never duplex. */ + MA_ASSERT(pDevice != NULL); formatALSA = ma_convert_ma_format_to_alsa_format((deviceType == ma_device_type_capture) ? pConfig->capture.format : pConfig->playback.format); shareMode = (deviceType == ma_device_type_capture) ? pConfig->capture.shareMode : pConfig->playback.shareMode; @@ -13921,7 +14438,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con /* If using the default buffer size we may want to apply some device-specific scaling for known devices that have peculiar latency characteristics */ bufferSizeScaleFactor = 1; if (pDevice->usingDefaultBufferSize) { - ma_snd_pcm_info_t* pInfo = (ma_snd_pcm_info_t*)calloc(1, ((ma_snd_pcm_info_sizeof_proc)pContext->alsa.snd_pcm_info_sizeof)()); + ma_snd_pcm_info_t* pInfo = (ma_snd_pcm_info_t*)ma__calloc_from_callbacks(((ma_snd_pcm_info_sizeof_proc)pContext->alsa.snd_pcm_info_sizeof)(), &pContext->allocationCallbacks); if (pInfo == NULL) { return MA_OUT_OF_MEMORY; } @@ -13936,7 +14453,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con /* It's the default device. We need to use DESC from snd_device_name_hint(). */ if (((ma_snd_device_name_hint_proc)pContext->alsa.snd_device_name_hint)(-1, "pcm", (void***)&ppDeviceHints) < 0) { - ma_free(pInfo); + ma__free_from_callbacks(pInfo, &pContext->allocationCallbacks); return MA_NO_BACKEND; } @@ -13972,18 +14489,18 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con } } - ma_free(pInfo); + ma__free_from_callbacks(pInfo, &pContext->allocationCallbacks); } /* Hardware parameters. */ - pHWParams = (ma_snd_pcm_hw_params_t*)calloc(1, ((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)()); + pHWParams = (ma_snd_pcm_hw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_hw_params_sizeof_proc)pContext->alsa.snd_pcm_hw_params_sizeof)(), &pContext->allocationCallbacks); if (pHWParams == NULL) { return MA_OUT_OF_MEMORY; } if (((ma_snd_pcm_hw_params_any_proc)pContext->alsa.snd_pcm_hw_params_any)(pPCM, pHWParams) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize hardware parameters. snd_pcm_hw_params_any() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE); } @@ -14002,7 +14519,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con if (!isUsingMMap) { if (((ma_snd_pcm_hw_params_set_access_proc)pContext->alsa.snd_pcm_hw_params_set_access)(pPCM, pHWParams, MA_SND_PCM_ACCESS_RW_INTERLEAVED) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set access mode to neither SND_PCM_ACCESS_MMAP_INTERLEAVED nor SND_PCM_ACCESS_RW_INTERLEAVED. snd_pcm_hw_params_set_access() failed.", MA_FORMAT_NOT_SUPPORTED); } @@ -14018,9 +14535,9 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con ma_snd_pcm_format_mask_t* pFormatMask; /* Try getting every supported format first. */ - pFormatMask = (ma_snd_pcm_format_mask_t*)calloc(1, ((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)()); + pFormatMask = (ma_snd_pcm_format_mask_t*)ma__calloc_from_callbacks(((ma_snd_pcm_format_mask_sizeof_proc)pContext->alsa.snd_pcm_format_mask_sizeof)(), &pContext->allocationCallbacks); if (pFormatMask == NULL) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return MA_OUT_OF_MEMORY; } @@ -14060,24 +14577,24 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con } if (formatALSA == MA_SND_PCM_FORMAT_UNKNOWN) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. The device does not support any miniaudio formats.", MA_FORMAT_NOT_SUPPORTED); } } - ma_free(pFormatMask); + ma__free_from_callbacks(pFormatMask, &pContext->allocationCallbacks); pFormatMask = NULL; if (((ma_snd_pcm_hw_params_set_format_proc)pContext->alsa.snd_pcm_hw_params_set_format)(pPCM, pHWParams, formatALSA) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Format not supported. snd_pcm_hw_params_set_format() failed.", MA_FORMAT_NOT_SUPPORTED); } internalFormat = ma_format_from_alsa(formatALSA); if (internalFormat == ma_format_unknown) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] The chosen format is not supported by miniaudio.", MA_FORMAT_NOT_SUPPORTED); } @@ -14087,7 +14604,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con { unsigned int channels = (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels; if (((ma_snd_pcm_hw_params_set_channels_near_proc)pContext->alsa.snd_pcm_hw_params_set_channels_near)(pPCM, pHWParams, &channels) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set channel count. snd_pcm_hw_params_set_channels_near() failed.", MA_FORMAT_NOT_SUPPORTED); } @@ -14119,7 +14636,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con sampleRate = pConfig->sampleRate; if (((ma_snd_pcm_hw_params_set_rate_near_proc)pContext->alsa.snd_pcm_hw_params_set_rate_near)(pPCM, pHWParams, &sampleRate, 0) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Sample rate not supported. snd_pcm_hw_params_set_rate_near() failed.", MA_FORMAT_NOT_SUPPORTED); } @@ -14134,7 +14651,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con } if (((ma_snd_pcm_hw_params_set_buffer_size_near_proc)pContext->alsa.snd_pcm_hw_params_set_buffer_size_near)(pPCM, pHWParams, &actualBufferSizeInFrames) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set buffer size for device. snd_pcm_hw_params_set_buffer_size() failed.", MA_FORMAT_NOT_SUPPORTED); } @@ -14145,7 +14662,7 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con { ma_uint32 periods = pConfig->periods; if (((ma_snd_pcm_hw_params_set_periods_near_proc)pContext->alsa.snd_pcm_hw_params_set_periods_near)(pPCM, pHWParams, &periods, NULL) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set period count. snd_pcm_hw_params_set_periods_near() failed.", MA_FORMAT_NOT_SUPPORTED); } @@ -14154,30 +14671,30 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con /* Apply hardware parameters. */ if (((ma_snd_pcm_hw_params_proc)pContext->alsa.snd_pcm_hw_params)(pPCM, pHWParams) < 0) { - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set hardware parameters. snd_pcm_hw_params() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE); } - ma_free(pHWParams); + ma__free_from_callbacks(pHWParams, &pContext->allocationCallbacks); pHWParams = NULL; /* Software parameters. */ - pSWParams = (ma_snd_pcm_sw_params_t*)calloc(1, ((ma_snd_pcm_sw_params_sizeof_proc)pContext->alsa.snd_pcm_sw_params_sizeof)()); + pSWParams = (ma_snd_pcm_sw_params_t*)ma__calloc_from_callbacks(((ma_snd_pcm_sw_params_sizeof_proc)pContext->alsa.snd_pcm_sw_params_sizeof)(), &pContext->allocationCallbacks); if (pSWParams == NULL) { ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return MA_OUT_OF_MEMORY; } if (((ma_snd_pcm_sw_params_current_proc)pContext->alsa.snd_pcm_sw_params_current)(pPCM, pSWParams) != 0) { - ma_free(pSWParams); + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to initialize software parameters. snd_pcm_sw_params_current() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE); } if (((ma_snd_pcm_sw_params_set_avail_min_proc)pContext->alsa.snd_pcm_sw_params_set_avail_min)(pPCM, pSWParams, ma_prev_power_of_2(internalBufferSizeInFrames/internalPeriods)) != 0) { - ma_free(pSWParams); + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] snd_pcm_sw_params_set_avail_min() failed.", MA_FORMAT_NOT_SUPPORTED); } @@ -14194,24 +14711,24 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con the size of a period. But for full-duplex we need to set it such that it is at least two periods. */ if (((ma_snd_pcm_sw_params_set_start_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_start_threshold)(pPCM, pSWParams, (internalBufferSizeInFrames/internalPeriods)*2) != 0) { - ma_free(pSWParams); + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set start threshold for playback device. snd_pcm_sw_params_set_start_threshold() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE); } if (((ma_snd_pcm_sw_params_set_stop_threshold_proc)pContext->alsa.snd_pcm_sw_params_set_stop_threshold)(pPCM, pSWParams, bufferBoundary) != 0) { /* Set to boundary to loop instead of stop in the event of an xrun. */ - ma_free(pSWParams); + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set stop threshold for playback device. snd_pcm_sw_params_set_stop_threshold() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE); } } if (((ma_snd_pcm_sw_params_proc)pContext->alsa.snd_pcm_sw_params)(pPCM, pSWParams) != 0) { - ma_free(pSWParams); + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); ((ma_snd_pcm_close_proc)pDevice->pContext->alsa.snd_pcm_close)(pPCM); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to set software parameters. snd_pcm_sw_params() failed.", MA_FAILED_TO_CONFIGURE_BACKEND_DEVICE); } - ma_free(pSWParams); + ma__free_from_callbacks(pSWParams, &pContext->allocationCallbacks); pSWParams = NULL; @@ -14300,11 +14817,11 @@ ma_result ma_device_init_by_type__alsa(ma_context* pContext, const ma_device_con return MA_SUCCESS; } -ma_result ma_device_init__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__alsa(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); - ma_zero_object(&pDevice->alsa); + MA_ZERO_OBJECT(&pDevice->alsa); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -14327,73 +14844,12 @@ ma_result ma_device_init__alsa(ma_context* pContext, const ma_device_config* pCo return MA_SUCCESS; } -#if 0 -ma_result ma_device_start__alsa(ma_device* pDevice) -{ - ma_assert(pDevice != NULL); - - /* Prepare the device first... */ - if (((ma_snd_pcm_prepare_proc)pDevice->pContext->alsa.snd_pcm_prepare)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to prepare device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } - - /* - ... and then grab an initial chunk from the client. After this is done, the device should - automatically start playing, since that's how we configured the software parameters. - */ - if (pDevice->type == ma_device_type_playback) { - if (!ma_device_read_from_client_and_write__alsa(pDevice)) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to write initial chunk of data to the playback device.", MA_FAILED_TO_SEND_DATA_TO_DEVICE); - } - - /* mmap mode requires an explicit start. */ - if (pDevice->alsa.isUsingMMap) { - if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } - } - } else { - if (((ma_snd_pcm_start_proc)pDevice->pContext->alsa.snd_pcm_start)((ma_snd_pcm_t*)pDevice->alsa.pPCM) < 0) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[ALSA] Failed to start capture device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } - } - - return MA_SUCCESS; -} - -ma_result ma_device_break_main_loop__alsa(ma_device* pDevice) -{ - ma_assert(pDevice != NULL); - - pDevice->alsa.breakFromMainLoop = MA_TRUE; - return MA_SUCCESS; -} - -ma_result ma_device_main_loop__alsa(ma_device* pDevice) -{ - ma_assert(pDevice != NULL); - - pDevice->alsa.breakFromMainLoop = MA_FALSE; - if (pDevice->type == ma_device_type_playback) { - /* Playback. Read from client, write to device. */ - while (!pDevice->alsa.breakFromMainLoop && ma_device_read_from_client_and_write__alsa(pDevice)) { - } - } else { - /* Capture. Read from device, write to client. */ - while (!pDevice->alsa.breakFromMainLoop && ma_device_read_and_send_to_client__alsa(pDevice)) { - } - } - - return MA_SUCCESS; -} -#endif /* 0 */ - -ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 frameCount, ma_uint32* pFramesRead) { ma_snd_pcm_sframes_t resultALSA; - ma_assert(pDevice != NULL); - ma_assert(pFramesOut != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFramesOut != NULL); if (pFramesRead != NULL) { *pFramesRead = 0; @@ -14408,7 +14864,9 @@ ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 f /*printf("TRACE: EGAIN (read)\n");*/ continue; /* Try again. */ } else if (resultALSA == -EPIPE) { - /*printf("TRACE: EPIPE (read)\n");*/ + #if defined(MA_DEBUG_OUTPUT) + printf("TRACE: EPIPE (read)\n"); + #endif /* Overrun. Recover and try again. If this fails we need to return an error. */ if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMCapture, resultALSA, MA_TRUE) < 0) { @@ -14434,12 +14892,12 @@ ma_result ma_device_read__alsa(ma_device* pDevice, void* pFramesOut, ma_uint32 f return MA_SUCCESS; } -ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { ma_snd_pcm_sframes_t resultALSA; - ma_assert(pDevice != NULL); - ma_assert(pFrames != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pFrames != NULL); if (pFramesWritten != NULL) { *pFramesWritten = 0; @@ -14454,7 +14912,9 @@ ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint /*printf("TRACE: EGAIN (write)\n");*/ continue; /* Try again. */ } else if (resultALSA == -EPIPE) { - /*printf("TRACE: EPIPE (write)\n");*/ + #if defined(MA_DEBUG_OUTPUT) + printf("TRACE: EPIPE (write)\n"); + #endif /* Underrun. Recover and try again. If this fails we need to return an error. */ if (((ma_snd_pcm_recover_proc)pDevice->pContext->alsa.snd_pcm_recover)((ma_snd_pcm_t*)pDevice->alsa.pPCMPlayback, resultALSA, MA_TRUE) < 0) { /* MA_TRUE=silent (don't print anything on error). */ @@ -14487,12 +14947,12 @@ ma_result ma_device_write__alsa(ma_device* pDevice, const void* pFrames, ma_uint return MA_SUCCESS; } -ma_result ma_device_main_loop__alsa(ma_device* pDevice) +static ma_result ma_device_main_loop__alsa(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* Capture devices need to be started immediately. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { @@ -14513,77 +14973,88 @@ ma_result ma_device_main_loop__alsa(ma_device* pDevice) /* readi() and writei() */ /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__alsa(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__alsa(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__alsa(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__alsa(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__alsa()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } } break; @@ -14597,7 +15068,7 @@ ma_result ma_device_main_loop__alsa(ma_device* pDevice) /* readi() */ /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); ma_uint32 periodSizeInFrames = pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods; ma_uint32 framesReadThisPeriod = 0; @@ -14631,7 +15102,7 @@ ma_result ma_device_main_loop__alsa(ma_device* pDevice) /* writei() */ /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); ma_uint32 periodSizeInFrames = pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods; ma_uint32 framesWrittenThisPeriod = 0; @@ -14688,10 +15159,10 @@ ma_result ma_device_main_loop__alsa(ma_device* pDevice) return result; } -ma_result ma_context_uninit__alsa(ma_context* pContext) +static ma_result ma_context_uninit__alsa(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_alsa); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_alsa); /* Clean up memory for memory leak checkers. */ ((ma_snd_config_update_free_global_proc)pContext->alsa.snd_config_update_free_global)(); @@ -14705,7 +15176,7 @@ ma_result ma_context_uninit__alsa(ma_context* pContext) return MA_SUCCESS; } -ma_result ma_context_init__alsa(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__alsa(const ma_context_config* pConfig, ma_context* pContext) { #ifndef MA_NO_RUNTIME_LINKING const char* libasoundNames[] = { @@ -15466,7 +15937,7 @@ typedef struct ma_device_info* pInfo; } ma_pulse_device_enum_data; -ma_result ma_result_from_pulse(int result) +static ma_result ma_result_from_pulse(int result) { switch (result) { case MA_PA_OK: return MA_SUCCESS; @@ -15478,7 +15949,7 @@ ma_result ma_result_from_pulse(int result) } #if 0 -ma_pa_sample_format_t ma_format_to_pulse(ma_format format) +static ma_pa_sample_format_t ma_format_to_pulse(ma_format format) { if (ma_is_little_endian()) { switch (format) { @@ -15506,7 +15977,7 @@ ma_pa_sample_format_t ma_format_to_pulse(ma_format format) } #endif -ma_format ma_format_from_pulse(ma_pa_sample_format_t format) +static ma_format ma_format_from_pulse(ma_pa_sample_format_t format) { if (ma_is_little_endian()) { switch (format) { @@ -15533,7 +16004,7 @@ ma_format ma_format_from_pulse(ma_pa_sample_format_t format) } } -ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position) +static ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position) { switch (position) { @@ -15594,7 +16065,7 @@ ma_channel ma_channel_position_from_pulse(ma_pa_channel_position_t position) } #if 0 -ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position) +static ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position) { switch (position) { @@ -15636,11 +16107,11 @@ ma_pa_channel_position_t ma_channel_position_to_pulse(ma_channel position) } #endif -ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_pa_mainloop* pMainLoop, ma_pa_operation* pOP) +static ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_pa_mainloop* pMainLoop, ma_pa_operation* pOP) { - ma_assert(pContext != NULL); - ma_assert(pMainLoop != NULL); - ma_assert(pOP != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pMainLoop != NULL); + MA_ASSERT(pOP != NULL); while (((ma_pa_operation_get_state_proc)pContext->pulse.pa_operation_get_state)(pOP) == MA_PA_OPERATION_RUNNING) { int error = ((ma_pa_mainloop_iterate_proc)pContext->pulse.pa_mainloop_iterate)(pMainLoop, 1, NULL); @@ -15652,20 +16123,20 @@ ma_result ma_wait_for_operation__pulse(ma_context* pContext, ma_pa_mainloop* pMa return MA_SUCCESS; } -ma_result ma_device__wait_for_operation__pulse(ma_device* pDevice, ma_pa_operation* pOP) +static ma_result ma_device__wait_for_operation__pulse(ma_device* pDevice, ma_pa_operation* pOP) { - ma_assert(pDevice != NULL); - ma_assert(pOP != NULL); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pOP != NULL); return ma_wait_for_operation__pulse(pDevice->pContext, (ma_pa_mainloop*)pDevice->pulse.pMainLoop, pOP); } -ma_bool32 ma_context_is_device_id_equal__pulse(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__pulse(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return ma_strcmp(pID0->pulse, pID1->pulse) == 0; @@ -15680,18 +16151,18 @@ typedef struct ma_bool32 isTerminated; } ma_context_enumerate_devices_callback_data__pulse; -void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData) +static void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pSinkInfo, int endOfList, void* pUserData) { ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; ma_device_info deviceInfo; - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); if (endOfList || pData->isTerminated) { return; } - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); /* The name from PulseAudio is the ID for miniaudio. */ if (pSinkInfo->name != NULL) { @@ -15708,18 +16179,18 @@ void ma_context_enumerate_devices_sink_callback__pulse(ma_pa_context* pPulseCont (void)pPulseContext; /* Unused. */ } -void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSinkInfo, int endOfList, void* pUserData) +static void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pSinkInfo, int endOfList, void* pUserData) { ma_context_enumerate_devices_callback_data__pulse* pData = (ma_context_enumerate_devices_callback_data__pulse*)pUserData; ma_device_info deviceInfo; - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); if (endOfList || pData->isTerminated) { return; } - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); /* The name from PulseAudio is the ID for miniaudio. */ if (pSinkInfo->name != NULL) { @@ -15736,7 +16207,7 @@ void ma_context_enumerate_devices_source_callback__pulse(ma_pa_context* pPulseCo (void)pPulseContext; /* Unused. */ } -ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_result result = MA_SUCCESS; ma_context_enumerate_devices_callback_data__pulse callbackData; @@ -15746,8 +16217,8 @@ ma_result ma_context_enumerate_devices__pulse(ma_context* pContext, ma_enum_devi ma_pa_context* pPulseContext; int error; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); callbackData.pContext = pContext; callbackData.callback = callback; @@ -15849,7 +16320,7 @@ typedef struct ma_bool32 foundDevice; } ma_context_get_device_info_callback_data__pulse; -void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +static void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) { ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; @@ -15857,7 +16328,7 @@ void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContex return; } - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); pData->foundDevice = MA_TRUE; if (pInfo->name != NULL) { @@ -15878,7 +16349,7 @@ void ma_context_get_device_info_sink_callback__pulse(ma_pa_context* pPulseContex (void)pPulseContext; /* Unused. */ } -void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +static void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) { ma_context_get_device_info_callback_data__pulse* pData = (ma_context_get_device_info_callback_data__pulse*)pUserData; @@ -15886,7 +16357,7 @@ void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseCont return; } - ma_assert(pData != NULL); + MA_ASSERT(pData != NULL); pData->foundDevice = MA_TRUE; if (pInfo->name != NULL) { @@ -15907,7 +16378,7 @@ void ma_context_get_device_info_source_callback__pulse(ma_pa_context* pPulseCont (void)pPulseContext; /* Unused. */ } -ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_result result = MA_SUCCESS; ma_context_get_device_info_callback_data__pulse callbackData; @@ -15917,7 +16388,7 @@ ma_result ma_context_get_device_info__pulse(ma_context* pContext, ma_device_type ma_pa_context* pPulseContext; int error; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); /* No exclusive mode with the PulseAudio backend. */ if (shareMode == ma_share_mode_exclusive) { @@ -16004,16 +16475,16 @@ done: } -void ma_pulse_device_state_callback(ma_pa_context* pPulseContext, void* pUserData) +static void ma_pulse_device_state_callback(ma_pa_context* pPulseContext, void* pUserData) { ma_device* pDevice; ma_context* pContext; pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); pContext = pDevice->pContext; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); pDevice->pulse.pulseContextState = ((ma_pa_context_get_state_proc)pContext->pulse.pa_context_get_state)(pPulseContext); } @@ -16027,14 +16498,14 @@ void ma_device_sink_info_callback(ma_pa_context* pPulseContext, const ma_pa_sink } pInfoOut = (ma_pa_sink_info*)pUserData; - ma_assert(pInfoOut != NULL); + MA_ASSERT(pInfoOut != NULL); *pInfoOut = *pInfo; (void)pPulseContext; /* Unused. */ } -void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +static void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) { ma_pa_source_info* pInfoOut; @@ -16043,14 +16514,14 @@ void ma_device_source_info_callback(ma_pa_context* pPulseContext, const ma_pa_so } pInfoOut = (ma_pa_source_info*)pUserData; - ma_assert(pInfoOut != NULL); + MA_ASSERT(pInfoOut != NULL); *pInfoOut = *pInfo; (void)pPulseContext; /* Unused. */ } -void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) +static void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink_info* pInfo, int endOfList, void* pUserData) { ma_device* pDevice; @@ -16059,14 +16530,14 @@ void ma_device_sink_name_callback(ma_pa_context* pPulseContext, const ma_pa_sink } pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), pInfo->description, (size_t)-1); (void)pPulseContext; /* Unused. */ } -void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) +static void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_source_info* pInfo, int endOfList, void* pUserData) { ma_device* pDevice; @@ -16075,21 +16546,21 @@ void ma_device_source_name_callback(ma_pa_context* pPulseContext, const ma_pa_so } pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), pInfo->description, (size_t)-1); (void)pPulseContext; /* Unused. */ } -void ma_device_uninit__pulse(ma_device* pDevice) +static void ma_device_uninit__pulse(ma_device* pDevice) { ma_context* pContext; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); pContext = pDevice->pContext; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((ma_pa_stream_disconnect_proc)pContext->pulse.pa_stream_disconnect)((ma_pa_stream*)pDevice->pulse.pStreamCapture); @@ -16105,7 +16576,7 @@ void ma_device_uninit__pulse(ma_device* pDevice) ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)((ma_pa_mainloop*)pDevice->pulse.pMainLoop); } -ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 bufferSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss) +static ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 bufferSizeInFrames, ma_uint32 periods, const ma_pa_sample_spec* ss) { ma_pa_buffer_attr attr; attr.maxlength = bufferSizeInFrames * ma_get_bytes_per_sample(ma_format_from_pulse(ss->format)) * ss->channels; @@ -16117,7 +16588,7 @@ ma_pa_buffer_attr ma_device__pa_buffer_attr_new(ma_uint32 bufferSizeInFrames, ma return attr; } -ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap) +static ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pStreamName, const ma_pa_sample_spec* ss, const ma_pa_channel_map* cmap) { static int g_StreamCounter = 0; char actualStreamName[256]; @@ -16133,7 +16604,7 @@ ma_pa_stream* ma_device__pa_stream_new__pulse(ma_device* pDevice, const char* pS return ((ma_pa_stream_new_proc)pDevice->pContext->pulse.pa_stream_new)((ma_pa_context*)pDevice->pulse.pPulseContext, actualStreamName, ss, cmap); } -ma_result ma_device_init__pulse(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__pulse(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result = MA_SUCCESS; int error = 0; @@ -16152,8 +16623,8 @@ ma_result ma_device_init__pulse(ma_context* pContext, const ma_device_config* pC ma_uint32 iChannel; ma_pa_stream_flags_t streamFlags; - ma_assert(pDevice != NULL); - ma_zero_object(&pDevice->pulse); + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->pulse); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -16447,17 +16918,17 @@ on_error0: } -void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData) +static void ma_pulse_operation_complete_callback(ma_pa_stream* pStream, int success, void* pUserData) { ma_bool32* pIsSuccessful = (ma_bool32*)pUserData; - ma_assert(pIsSuccessful != NULL); + MA_ASSERT(pIsSuccessful != NULL); *pIsSuccessful = (ma_bool32)success; (void)pStream; /* Unused. */ } -ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork) +static ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type deviceType, int cork) { ma_context* pContext = pDevice->pContext; ma_bool32 wasSuccessful; @@ -16473,7 +16944,7 @@ ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type devic wasSuccessful = MA_FALSE; pStream = (ma_pa_stream*)((deviceType == ma_device_type_capture) ? pDevice->pulse.pStreamCapture : pDevice->pulse.pStreamPlayback); - ma_assert(pStream != NULL); + MA_ASSERT(pStream != NULL); pOP = ((ma_pa_stream_cork_proc)pContext->pulse.pa_stream_cork)(pStream, cork, ma_pulse_operation_complete_callback, &wasSuccessful); if (pOP == NULL) { @@ -16498,13 +16969,13 @@ ma_result ma_device__cork_stream__pulse(ma_device* pDevice, ma_device_type devic return MA_SUCCESS; } -ma_result ma_device_stop__pulse(ma_device* pDevice) +static ma_result ma_device_stop__pulse(ma_device* pDevice) { ma_result result; ma_bool32 wasSuccessful; ma_pa_operation* pOP; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { result = ma_device__cork_stream__pulse(pDevice, ma_device_type_capture, 1); @@ -16530,13 +17001,13 @@ ma_result ma_device_stop__pulse(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { ma_uint32 totalFramesWritten; - ma_assert(pDevice != NULL); - ma_assert(pPCMFrames != NULL); - ma_assert(frameCount > 0); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + MA_ASSERT(frameCount > 0); if (pFramesWritten != NULL) { *pFramesWritten = 0; @@ -16556,7 +17027,7 @@ ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_ void* pDst = (ma_uint8*)pDevice->pulse.pMappedBufferPlayback + (mappedBufferFramesConsumed * bpf); const void* pSrc = (const ma_uint8*)pPCMFrames + (totalFramesWritten * bpf); ma_uint32 framesToCopy = ma_min(pDevice->pulse.mappedBufferFramesRemainingPlayback, (frameCount - totalFramesWritten)); - ma_copy_memory(pDst, pSrc, framesToCopy * bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf); pDevice->pulse.mappedBufferFramesRemainingPlayback -= framesToCopy; totalFramesWritten += framesToCopy; @@ -16579,7 +17050,7 @@ ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_ pDevice->pulse.mappedBufferFramesCapacityPlayback = 0; } - ma_assert(totalFramesWritten <= frameCount); + MA_ASSERT(totalFramesWritten <= frameCount); if (totalFramesWritten == frameCount) { break; } @@ -16630,13 +17101,13 @@ ma_result ma_device_write__pulse(ma_device* pDevice, const void* pPCMFrames, ma_ return MA_SUCCESS; } -ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) { ma_uint32 totalFramesRead; - ma_assert(pDevice != NULL); - ma_assert(pPCMFrames != NULL); - ma_assert(frameCount > 0); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(pPCMFrames != NULL); + MA_ASSERT(frameCount > 0); if (pFramesRead != NULL) { *pFramesRead = 0; @@ -16648,8 +17119,11 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 return MA_DEVICE_NOT_STARTED; } - /* If a buffer is mapped we need to write to that first. Once it's consumed we reset the event and unmap it. */ - if (pDevice->pulse.pMappedBufferCapture != NULL && pDevice->pulse.mappedBufferFramesRemainingCapture > 0) { + /* + If a buffer is mapped we need to read from that first. Once it's consumed we need to drop it. Note that pDevice->pulse.pMappedBufferCapture can be null in which + case it could be a hole. In this case we just write zeros into the output buffer. + */ + if (pDevice->pulse.mappedBufferFramesRemainingCapture > 0) { ma_uint32 bpf = ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); ma_uint32 mappedBufferFramesConsumed = pDevice->pulse.mappedBufferFramesCapacityCapture - pDevice->pulse.mappedBufferFramesRemainingCapture; @@ -16662,9 +17136,12 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 */ if (pDevice->pulse.pMappedBufferCapture != NULL) { const void* pSrc = (const ma_uint8*)pDevice->pulse.pMappedBufferCapture + (mappedBufferFramesConsumed * bpf); - ma_copy_memory(pDst, pSrc, framesToCopy * bpf); + MA_COPY_MEMORY(pDst, pSrc, framesToCopy * bpf); } else { - ma_zero_memory(pDst, framesToCopy * bpf); + MA_ZERO_MEMORY(pDst, framesToCopy * bpf); + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Filling hole with silence.\n"); + #endif } pDevice->pulse.mappedBufferFramesRemainingCapture -= framesToCopy; @@ -16676,7 +17153,13 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 mapping another chunk. If this fails we need to wait for data to become available. */ if (pDevice->pulse.mappedBufferFramesCapacityCapture > 0 && pDevice->pulse.mappedBufferFramesRemainingCapture == 0) { - int error = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)((ma_pa_stream*)pDevice->pulse.pStreamCapture); + int error; + + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_drop()\n"); + #endif + + error = ((ma_pa_stream_drop_proc)pDevice->pContext->pulse.pa_stream_drop)((ma_pa_stream*)pDevice->pulse.pStreamCapture); if (error != 0) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to drop fragment.", ma_result_from_pulse(error)); } @@ -16686,14 +17169,15 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 pDevice->pulse.mappedBufferFramesCapacityCapture = 0; } - ma_assert(totalFramesRead <= frameCount); + MA_ASSERT(totalFramesRead <= frameCount); if (totalFramesRead == frameCount) { break; } /* Getting here means we need to map a new buffer. If we don't have enough data we wait for more. */ for (;;) { - size_t readableSizeInBytes; + int error; + size_t bytesMapped; if (ma_device__get_state(pDevice) != MA_STATE_STARTED) { break; @@ -16701,38 +17185,45 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 /* If the device has been corked, don't try to continue. */ if (((ma_pa_stream_is_corked_proc)pDevice->pContext->pulse.pa_stream_is_corked)((ma_pa_stream*)pDevice->pulse.pStreamCapture)) { + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Corked.\n"); + #endif break; } - readableSizeInBytes = ((ma_pa_stream_readable_size_proc)pDevice->pContext->pulse.pa_stream_readable_size)((ma_pa_stream*)pDevice->pulse.pStreamCapture); - if (readableSizeInBytes != (size_t)-1) { - /*size_t periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels);*/ - if (readableSizeInBytes > 0) { - /* Data is avaialable. */ - size_t bytesMapped = (size_t)-1; - int error = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &pDevice->pulse.pMappedBufferCapture, &bytesMapped); - if (error < 0) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to peek capture buffer.", ma_result_from_pulse(error)); - } + MA_ASSERT(pDevice->pulse.pMappedBufferCapture == NULL); /* <-- We're about to map a buffer which means we shouldn't have an existing mapping. */ - if (pDevice->pulse.pMappedBufferCapture == NULL && bytesMapped == 0) { - /* Nothing available. This shouldn't happen because we checked earlier with pa_stream_readable_size(). I'm going to throw an error in this case. */ - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Nothing available after peeking capture buffer.", MA_ERROR); - } + error = ((ma_pa_stream_peek_proc)pDevice->pContext->pulse.pa_stream_peek)((ma_pa_stream*)pDevice->pulse.pStreamCapture, &pDevice->pulse.pMappedBufferCapture, &bytesMapped); + if (error < 0) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to peek capture buffer.", ma_result_from_pulse(error)); + } - pDevice->pulse.mappedBufferFramesCapacityCapture = bytesMapped / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - pDevice->pulse.mappedBufferFramesRemainingCapture = pDevice->pulse.mappedBufferFramesCapacityCapture; + if (bytesMapped > 0) { + pDevice->pulse.mappedBufferFramesCapacityCapture = bytesMapped / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pDevice->pulse.mappedBufferFramesRemainingCapture = pDevice->pulse.mappedBufferFramesCapacityCapture; - break; - } else { - /* No data available. Need to wait for more. */ + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Mapped. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCapture); + #endif + + if (pDevice->pulse.pMappedBufferCapture == NULL) { + /* It's a hole. */ + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: Call pa_stream_peek(). Hole.\n"); + #endif + } + + break; + } else { + if (pDevice->pulse.pMappedBufferCapture == NULL) { + /* Nothing available yet. Need to wait for more. */ /* I have had reports of a deadlock in this part of the code. I have reproduced this when using the "Built-in Audio Analogue Stereo" device without an actual microphone connected. I'm experimenting here by not blocking in pa_mainloop_iterate() and instead sleep for a bit when there are no dispatches. */ - int error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 0, NULL); + error = ((ma_pa_mainloop_iterate_proc)pDevice->pContext->pulse.pa_mainloop_iterate)((ma_pa_mainloop*)pDevice->pulse.pMainLoop, 0, NULL); if (error < 0) { return ma_result_from_pulse(error); } @@ -16742,10 +17233,13 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 ma_sleep(1); } - continue; + #if defined(MA_DEBUG_OUTPUT) + printf("[PulseAudio] ma_device_read__pulse: No data available. Waiting. mappedBufferFramesCapacityCapture=%d, mappedBufferFramesRemainingCapture=%d\n", pDevice->pulse.mappedBufferFramesCapacityCapture, pDevice->pulse.mappedBufferFramesRemainingCapture); + #endif + } else { + /* Getting here means we mapped 0 bytes, but have a non-NULL buffer. I don't think this should ever happen. */ + MA_ASSERT(MA_FALSE); } - } else { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[PulseAudio] Failed to query the stream's readable size.", MA_ERROR); } } } @@ -16757,12 +17251,12 @@ ma_result ma_device_read__pulse(ma_device* pDevice, void* pPCMFrames, ma_uint32 return MA_SUCCESS; } -ma_result ma_device_main_loop__pulse(ma_device* pDevice) +static ma_result ma_device_main_loop__pulse(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* The stream needs to be uncorked first. We do this at the top for both capture and playback for PulseAudio. */ if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { @@ -16785,83 +17279,94 @@ ma_result ma_device_main_loop__pulse(ma_device* pDevice) case ma_device_type_duplex: { /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__pulse(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__pulse(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ + + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__pulse(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__pulse(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__pulse()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } break; case ma_device_type_capture: { - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); ma_uint32 periodSizeInFrames = pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods; ma_uint32 framesReadThisPeriod = 0; @@ -16887,7 +17392,7 @@ ma_result ma_device_main_loop__pulse(ma_device* pDevice) case ma_device_type_playback: { - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); ma_uint32 periodSizeInFrames = pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods; ma_uint32 framesWrittenThisPeriod = 0; @@ -16924,15 +17429,15 @@ ma_result ma_device_main_loop__pulse(ma_device* pDevice) } -ma_result ma_context_uninit__pulse(ma_context* pContext) +static ma_result ma_context_uninit__pulse(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_pulseaudio); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_pulseaudio); - ma_free(pContext->pulse.pServerName); + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); pContext->pulse.pServerName = NULL; - ma_free(pContext->pulse.pApplicationName); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); pContext->pulse.pApplicationName = NULL; #ifndef MA_NO_RUNTIME_LINKING @@ -16942,7 +17447,7 @@ ma_result ma_context_uninit__pulse(ma_context* pContext) return MA_SUCCESS; } -ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* pContext) { #ifndef MA_NO_RUNTIME_LINKING const char* libpulseNames[] = { @@ -17110,10 +17615,10 @@ ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* p pContext->onDeviceMainLoop = ma_device_main_loop__pulse; if (pConfig->pulse.pApplicationName) { - pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName); + pContext->pulse.pApplicationName = ma_copy_string(pConfig->pulse.pApplicationName, &pContext->allocationCallbacks); } if (pConfig->pulse.pServerName) { - pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName); + pContext->pulse.pServerName = ma_copy_string(pConfig->pulse.pServerName, &pContext->allocationCallbacks); } pContext->pulse.tryAutoSpawn = pConfig->pulse.tryAutoSpawn; @@ -17129,8 +17634,8 @@ ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* p pMainLoop = ((ma_pa_mainloop_new_proc)pContext->pulse.pa_mainloop_new)(); if (pMainLoop == NULL) { - ma_free(pContext->pulse.pServerName); - ma_free(pContext->pulse.pApplicationName); + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); #ifndef MA_NO_RUNTIME_LINKING ma_dlclose(pContext, pContext->pulse.pulseSO); #endif @@ -17139,8 +17644,8 @@ ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* p pAPI = ((ma_pa_mainloop_get_api_proc)pContext->pulse.pa_mainloop_get_api)(pMainLoop); if (pAPI == NULL) { - ma_free(pContext->pulse.pServerName); - ma_free(pContext->pulse.pApplicationName); + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); #ifndef MA_NO_RUNTIME_LINKING ma_dlclose(pContext, pContext->pulse.pulseSO); @@ -17150,8 +17655,8 @@ ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* p pPulseContext = ((ma_pa_context_new_proc)pContext->pulse.pa_context_new)(pAPI, pContext->pulse.pApplicationName); if (pPulseContext == NULL) { - ma_free(pContext->pulse.pServerName); - ma_free(pContext->pulse.pApplicationName); + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); #ifndef MA_NO_RUNTIME_LINKING ma_dlclose(pContext, pContext->pulse.pulseSO); @@ -17161,8 +17666,8 @@ ma_result ma_context_init__pulse(const ma_context_config* pConfig, ma_context* p error = ((ma_pa_context_connect_proc)pContext->pulse.pa_context_connect)(pPulseContext, pContext->pulse.pServerName, 0, NULL); if (error != MA_PA_OK) { - ma_free(pContext->pulse.pServerName); - ma_free(pContext->pulse.pApplicationName); + ma_free(pContext->pulse.pServerName, &pContext->allocationCallbacks); + ma_free(pContext->pulse.pApplicationName, &pContext->allocationCallbacks); ((ma_pa_context_unref_proc)pContext->pulse.pa_context_unref)(pPulseContext); ((ma_pa_mainloop_free_proc)pContext->pulse.pa_mainloop_free)(pMainLoop); #ifndef MA_NO_RUNTIME_LINKING @@ -17238,15 +17743,15 @@ typedef const char* (* ma_jack_port_name_proc) (const ma_jac typedef void* (* ma_jack_port_get_buffer_proc) (ma_jack_port_t* port, ma_jack_nframes_t nframes); typedef void (* ma_jack_free_proc) (void* ptr); -ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient) +static ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** ppClient) { size_t maxClientNameSize; char clientName[256]; ma_jack_status_t status; ma_jack_client_t* pClient; - ma_assert(pContext != NULL); - ma_assert(ppClient != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppClient != NULL); if (ppClient) { *ppClient = NULL; @@ -17267,27 +17772,27 @@ ma_result ma_context_open_client__jack(ma_context* pContext, ma_jack_client_t** return MA_SUCCESS; } -ma_bool32 ma_context_is_device_id_equal__jack(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__jack(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return pID0->jack == pID1->jack; } -ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_bool32 cbResult = MA_TRUE; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); /* Playback. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); } @@ -17295,7 +17800,7 @@ ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devic /* Capture. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); } @@ -17303,13 +17808,13 @@ ma_result ma_context_enumerate_devices__jack(ma_context* pContext, ma_enum_devic return MA_SUCCESS; } -ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_jack_client_t* pClient; ma_result result; const char** ppPorts; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); /* No exclusive mode with the JACK backend. */ if (shareMode == ma_share_mode_exclusive) { @@ -17343,7 +17848,7 @@ ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type pDeviceInfo->minChannels = 0; pDeviceInfo->maxChannels = 0; - ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, NULL, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput)); + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ((deviceType == ma_device_type_playback) ? ma_JackPortIsInput : ma_JackPortIsOutput)); if (ppPorts == NULL) { ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pClient); return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); @@ -17362,25 +17867,25 @@ ma_result ma_context_get_device_info__jack(ma_context* pContext, ma_device_type } -void ma_device_uninit__jack(ma_device* pDevice) +static void ma_device_uninit__jack(ma_device* pDevice) { ma_context* pContext; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); pContext = pDevice->pContext; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (pDevice->jack.pClient != NULL) { ((ma_jack_client_close_proc)pContext->jack.jack_client_close)((ma_jack_client_t*)pDevice->jack.pClient); } if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - ma_free(pDevice->jack.pIntermediaryBufferCapture); + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); } if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - ma_free(pDevice->jack.pIntermediaryBufferPlayback); + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); } if (pDevice->type == ma_device_type_duplex) { @@ -17388,36 +17893,42 @@ void ma_device_uninit__jack(ma_device* pDevice) } } -void ma_device__jack_shutdown_callback(void* pUserData) +static void ma_device__jack_shutdown_callback(void* pUserData) { /* JACK died. Stop the device. */ ma_device* pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); ma_device_stop(pDevice); } -int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData) +static int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUserData) { ma_device* pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - float* pNewBuffer = (float*)ma_realloc(pDevice->jack.pIntermediaryBufferCapture, frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat))); + size_t newBufferSize = frameCount * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + float* pNewBuffer = (float*)ma__calloc_from_callbacks(newBufferSize, &pDevice->pContext->allocationCallbacks); if (pNewBuffer == NULL) { return MA_OUT_OF_MEMORY; } + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferCapture, &pDevice->pContext->allocationCallbacks); + pDevice->jack.pIntermediaryBufferCapture = pNewBuffer; pDevice->playback.internalBufferSizeInFrames = frameCount * pDevice->capture.internalPeriods; } if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - float* pNewBuffer = (float*)ma_realloc(pDevice->jack.pIntermediaryBufferPlayback, frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat))); + size_t newBufferSize = frameCount * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + float* pNewBuffer = (float*)ma__calloc_from_callbacks(newBufferSize, &pDevice->pContext->allocationCallbacks); if (pNewBuffer == NULL) { return MA_OUT_OF_MEMORY; } + ma__free_from_callbacks(pDevice->jack.pIntermediaryBufferPlayback, &pDevice->pContext->allocationCallbacks); + pDevice->jack.pIntermediaryBufferPlayback = pNewBuffer; pDevice->playback.internalBufferSizeInFrames = frameCount * pDevice->playback.internalPeriods; } @@ -17425,17 +17936,17 @@ int ma_device__jack_buffer_size_callback(ma_jack_nframes_t frameCount, void* pUs return 0; } -int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData) +static int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserData) { ma_device* pDevice; ma_context* pContext; ma_uint32 iChannel; pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); pContext = pDevice->pContext; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { /* Channels need to be interleaved. */ @@ -17486,15 +17997,15 @@ int ma_device__jack_process_callback(ma_jack_nframes_t frameCount, void* pUserDa return 0; } -ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result; ma_uint32 periods; ma_uint32 bufferSizeInFrames; - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -17541,7 +18052,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo pDevice->capture.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); - ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsOutput); + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); if (ppPorts == NULL) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); } @@ -17566,7 +18077,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo pDevice->capture.internalBufferSizeInFrames = bufferSizeInFrames; pDevice->capture.internalPeriods = periods; - pDevice->jack.pIntermediaryBufferCapture = (float*)ma_malloc((pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods) * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat))); + pDevice->jack.pIntermediaryBufferCapture = (float*)ma__calloc_from_callbacks((pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods) * (pDevice->capture.internalChannels * ma_get_bytes_per_sample(pDevice->capture.internalFormat)), &pContext->allocationCallbacks); if (pDevice->jack.pIntermediaryBufferCapture == NULL) { ma_device_uninit__jack(pDevice); return MA_OUT_OF_MEMORY; @@ -17581,7 +18092,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo pDevice->playback.internalSampleRate = ((ma_jack_get_sample_rate_proc)pContext->jack.jack_get_sample_rate)((ma_jack_client_t*)pDevice->jack.pClient); ma_get_standard_channel_map(ma_standard_channel_map_alsa, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); - ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsInput); + ppPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); if (ppPorts == NULL) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to query physical ports.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); } @@ -17606,7 +18117,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo pDevice->playback.internalBufferSizeInFrames = bufferSizeInFrames; pDevice->playback.internalPeriods = periods; - pDevice->jack.pIntermediaryBufferPlayback = (float*)ma_malloc((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods) * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat))); + pDevice->jack.pIntermediaryBufferPlayback = (float*)ma__calloc_from_callbacks((pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods) * (pDevice->playback.internalChannels * ma_get_bytes_per_sample(pDevice->playback.internalFormat)), &pContext->allocationCallbacks); if (pDevice->jack.pIntermediaryBufferPlayback == NULL) { ma_device_uninit__jack(pDevice); return MA_OUT_OF_MEMORY; @@ -17615,7 +18126,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo if (pDevice->type == ma_device_type_duplex) { ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames); - result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->jack.duplexRB); + result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->jack.duplexRB); if (result != MA_SUCCESS) { ma_device_uninit__jack(pDevice); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to initialize ring buffer.", result); @@ -17627,7 +18138,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo void* pMarginData; ma_pcm_rb_acquire_write(&pDevice->jack.duplexRB, &marginSizeInFrames, &pMarginData); { - ma_zero_memory(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); } ma_pcm_rb_commit_write(&pDevice->jack.duplexRB, marginSizeInFrames, pMarginData); } @@ -17637,7 +18148,7 @@ ma_result ma_device_init__jack(ma_context* pContext, const ma_device_config* pCo } -ma_result ma_device_start__jack(ma_device* pDevice) +static ma_result ma_device_start__jack(ma_device* pDevice) { ma_context* pContext = pDevice->pContext; int resultJACK; @@ -17649,7 +18160,7 @@ ma_result ma_device_start__jack(ma_device* pDevice) } if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsOutput); + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsOutput); if (ppServerPorts == NULL) { ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR); @@ -17671,7 +18182,7 @@ ma_result ma_device_start__jack(ma_device* pDevice) } if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, NULL, ma_JackPortIsPhysical | ma_JackPortIsInput); + const char** ppServerPorts = ((ma_jack_get_ports_proc)pContext->jack.jack_get_ports)((ma_jack_client_t*)pDevice->jack.pClient, NULL, MA_JACK_DEFAULT_AUDIO_TYPE, ma_JackPortIsPhysical | ma_JackPortIsInput); if (ppServerPorts == NULL) { ((ma_jack_deactivate_proc)pContext->jack.jack_deactivate)((ma_jack_client_t*)pDevice->jack.pClient); return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[JACK] Failed to retrieve physical ports.", MA_ERROR); @@ -17695,7 +18206,7 @@ ma_result ma_device_start__jack(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_stop__jack(ma_device* pDevice) +static ma_result ma_device_stop__jack(ma_device* pDevice) { ma_context* pContext = pDevice->pContext; ma_stop_proc onStop; @@ -17713,12 +18224,12 @@ ma_result ma_device_stop__jack(ma_device* pDevice) } -ma_result ma_context_uninit__jack(ma_context* pContext) +static ma_result ma_context_uninit__jack(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_jack); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_jack); - ma_free(pContext->jack.pClientName); + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); pContext->jack.pClientName = NULL; #ifndef MA_NO_RUNTIME_LINKING @@ -17728,7 +18239,7 @@ ma_result ma_context_uninit__jack(ma_context* pContext) return MA_SUCCESS; } -ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pContext) { #ifndef MA_NO_RUNTIME_LINKING const char* libjackNames[] = { @@ -17820,7 +18331,7 @@ ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pC pContext->onDeviceStop = ma_device_stop__jack; if (pConfig->jack.pClientName != NULL) { - pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName); + pContext->jack.pClientName = ma_copy_string(pConfig->jack.pClientName, &pContext->allocationCallbacks); } pContext->jack.tryStartServer = pConfig->jack.tryStartServer; @@ -17832,7 +18343,7 @@ ma_result ma_context_init__jack(const ma_context_config* pConfig, ma_context* pC ma_jack_client_t* pDummyClient; ma_result result = ma_context_open_client__jack(pContext, &pDummyClient); if (result != MA_SUCCESS) { - ma_free(pContext->jack.pClientName); + ma_free(pContext->jack.pClientName, &pContext->allocationCallbacks); #ifndef MA_NO_RUNTIME_LINKING ma_dlclose(pContext, pContext->jack.jackSO); #endif @@ -17858,6 +18369,12 @@ Core Audio Backend #if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE == 1 #define MA_APPLE_MOBILE + #if defined(TARGET_OS_TV) && TARGET_OS_TV == 1 + #define MA_APPLE_TV + #endif + #if defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1 + #define MA_APPLE_WATCH + #endif #else #define MA_APPLE_DESKTOP #endif @@ -17900,7 +18417,7 @@ typedef OSStatus (* ma_AudioUnitRender_proc)(AudioUnit inUnit, AudioUnitRenderAc #define MA_COREAUDIO_OUTPUT_BUS 0 #define MA_COREAUDIO_INPUT_BUS 1 -ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit); +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit); /* Core Audio @@ -17935,7 +18452,7 @@ size, allocate a block of memory of that size and then call AudioObjectGetProper AudioDeviceID's so just do "dataSize/sizeof(AudioDeviceID)" to know the device count. */ -ma_result ma_result_from_OSStatus(OSStatus status) +static ma_result ma_result_from_OSStatus(OSStatus status) { switch (status) { @@ -17958,7 +18475,7 @@ ma_result ma_result_from_OSStatus(OSStatus status) } #if 0 -ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit) +static ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit) { switch (bit) { @@ -17985,7 +18502,7 @@ ma_channel ma_channel_from_AudioChannelBitmap(AudioChannelBitmap bit) } #endif -ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label) +static ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label) { switch (label) { @@ -18079,10 +18596,10 @@ ma_channel ma_channel_from_AudioChannelLabel(AudioChannelLabel label) } } -ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut) +static ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescription* pDescription, ma_format* pFormatOut) { - ma_assert(pDescription != NULL); - ma_assert(pFormatOut != NULL); + MA_ASSERT(pDescription != NULL); + MA_ASSERT(pFormatOut != NULL); *pFormatOut = ma_format_unknown; /* Safety. */ @@ -18144,9 +18661,9 @@ ma_result ma_format_from_AudioStreamBasicDescription(const AudioStreamBasicDescr return MA_FORMAT_NOT_SUPPORTED; } -ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel channelMap[MA_MAX_CHANNELS]) +static ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_assert(pChannelLayout != NULL); + MA_ASSERT(pChannelLayout != NULL); if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { UInt32 iChannel; @@ -18223,16 +18740,16 @@ ma_result ma_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChanne #if defined(MA_APPLE_DESKTOP) -ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */ +static ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) /* NOTE: Free the returned buffer with ma_free(). */ { AudioObjectPropertyAddress propAddressDevices; UInt32 deviceObjectsDataSize; OSStatus status; AudioObjectID* pDeviceObjectIDs; - ma_assert(pContext != NULL); - ma_assert(pDeviceCount != NULL); - ma_assert(ppDeviceObjectIDs != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceCount != NULL); + MA_ASSERT(ppDeviceObjectIDs != NULL); /* Safety. */ *pDeviceCount = 0; @@ -18247,31 +18764,30 @@ ma_result ma_get_device_object_ids__coreaudio(ma_context* pContext, UInt32* pDev return ma_result_from_OSStatus(status); } - pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize); + pDeviceObjectIDs = (AudioObjectID*)ma_malloc(deviceObjectsDataSize, &pContext->allocationCallbacks); if (pDeviceObjectIDs == NULL) { return MA_OUT_OF_MEMORY; } status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(kAudioObjectSystemObject, &propAddressDevices, 0, NULL, &deviceObjectsDataSize, pDeviceObjectIDs); if (status != noErr) { - ma_free(pDeviceObjectIDs); + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); return ma_result_from_OSStatus(status); } *pDeviceCount = deviceObjectsDataSize / sizeof(AudioObjectID); *ppDeviceObjectIDs = pDeviceObjectIDs; - (void)pContext; /* Unused. */ return MA_SUCCESS; } -ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID) +static ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjectID objectID, CFStringRef* pUID) { AudioObjectPropertyAddress propAddress; UInt32 dataSize; OSStatus status; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); propAddress.mSelector = kAudioDevicePropertyDeviceUID; propAddress.mScope = kAudioObjectPropertyScopeGlobal; @@ -18286,12 +18802,12 @@ ma_result ma_get_AudioObject_uid_as_CFStringRef(ma_context* pContext, AudioObjec return MA_SUCCESS; } -ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +static ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) { CFStringRef uid; ma_result result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); result = ma_get_AudioObject_uid_as_CFStringRef(pContext, objectID, &uid); if (result != MA_SUCCESS) { @@ -18306,14 +18822,14 @@ ma_result ma_get_AudioObject_uid(ma_context* pContext, AudioObjectID objectID, s return MA_SUCCESS; } -ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) +static ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, size_t bufferSize, char* bufferOut) { AudioObjectPropertyAddress propAddress; CFStringRef deviceName = NULL; UInt32 dataSize; OSStatus status; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); propAddress.mSelector = kAudioDevicePropertyDeviceNameCFString; propAddress.mScope = kAudioObjectPropertyScopeGlobal; @@ -18333,7 +18849,7 @@ ma_result ma_get_AudioObject_name(ma_context* pContext, AudioObjectID objectID, return MA_SUCCESS; } -ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope) +static ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID deviceObjectID, AudioObjectPropertyScope scope) { AudioObjectPropertyAddress propAddress; UInt32 dataSize; @@ -18341,7 +18857,7 @@ ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID AudioBufferList* pBufferList; ma_bool32 isSupported; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); /* To know whether or not a device is an input device we need ot look at the stream configuration. If it has an output channel it's a playback device. */ propAddress.mSelector = kAudioDevicePropertyStreamConfiguration; @@ -18353,14 +18869,14 @@ ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID return MA_FALSE; } - pBufferList = (AudioBufferList*)ma_malloc(dataSize); + pBufferList = (AudioBufferList*)ma__malloc_from_callbacks(dataSize, &pContext->allocationCallbacks); if (pBufferList == NULL) { return MA_FALSE; /* Out of memory. */ } status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pBufferList); if (status != noErr) { - ma_free(pBufferList); + ma__free_from_callbacks(pBufferList, &pContext->allocationCallbacks); return MA_FALSE; } @@ -18369,31 +18885,31 @@ ma_bool32 ma_does_AudioObject_support_scope(ma_context* pContext, AudioObjectID isSupported = MA_TRUE; } - ma_free(pBufferList); + ma__free_from_callbacks(pBufferList, &pContext->allocationCallbacks); return isSupported; } -ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID) +static ma_bool32 ma_does_AudioObject_support_playback(ma_context* pContext, AudioObjectID deviceObjectID) { return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeOutput); } -ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID) +static ma_bool32 ma_does_AudioObject_support_capture(ma_context* pContext, AudioObjectID deviceObjectID) { return ma_does_AudioObject_support_scope(pContext, deviceObjectID, kAudioObjectPropertyScopeInput); } -ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */ +static ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pDescriptionCount, AudioStreamRangedDescription** ppDescriptions) /* NOTE: Free the returned pointer with ma_free(). */ { AudioObjectPropertyAddress propAddress; UInt32 dataSize; OSStatus status; AudioStreamRangedDescription* pDescriptions; - ma_assert(pContext != NULL); - ma_assert(pDescriptionCount != NULL); - ma_assert(ppDescriptions != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDescriptionCount != NULL); + MA_ASSERT(ppDescriptions != NULL); /* TODO: Experiment with kAudioStreamPropertyAvailablePhysicalFormats instead of (or in addition to) kAudioStreamPropertyAvailableVirtualFormats. My @@ -18408,14 +18924,14 @@ ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObje return ma_result_from_OSStatus(status); } - pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize); + pDescriptions = (AudioStreamRangedDescription*)ma_malloc(dataSize, &pContext->allocationCallbacks); if (pDescriptions == NULL) { return MA_OUT_OF_MEMORY; } status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pDescriptions); if (status != noErr) { - ma_free(pDescriptions); + ma_free(pDescriptions, &pContext->allocationCallbacks); return ma_result_from_OSStatus(status); } @@ -18425,15 +18941,15 @@ ma_result ma_get_AudioObject_stream_descriptions(ma_context* pContext, AudioObje } -ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */ +static ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, AudioChannelLayout** ppChannelLayout) /* NOTE: Free the returned pointer with ma_free(). */ { AudioObjectPropertyAddress propAddress; UInt32 dataSize; OSStatus status; AudioChannelLayout* pChannelLayout; - ma_assert(pContext != NULL); - ma_assert(ppChannelLayout != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(ppChannelLayout != NULL); *ppChannelLayout = NULL; /* Safety. */ @@ -18446,14 +18962,14 @@ ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID return ma_result_from_OSStatus(status); } - pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize); + pChannelLayout = (AudioChannelLayout*)ma_malloc(dataSize, &pContext->allocationCallbacks); if (pChannelLayout == NULL) { return MA_OUT_OF_MEMORY; } status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pChannelLayout); if (status != noErr) { - ma_free(pChannelLayout); + ma_free(pChannelLayout, &pContext->allocationCallbacks); return ma_result_from_OSStatus(status); } @@ -18461,13 +18977,13 @@ ma_result ma_get_AudioObject_channel_layout(ma_context* pContext, AudioObjectID return MA_SUCCESS; } -ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount) +static ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pChannelCount) { AudioChannelLayout* pChannelLayout; ma_result result; - ma_assert(pContext != NULL); - ma_assert(pChannelCount != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pChannelCount != NULL); *pChannelCount = 0; /* Safety. */ @@ -18484,16 +19000,17 @@ ma_result ma_get_AudioObject_channel_count(ma_context* pContext, AudioObjectID d *pChannelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); } - ma_free(pChannelLayout); + ma_free(pChannelLayout, &pContext->allocationCallbacks); return MA_SUCCESS; } -ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS]) +#if 0 +static ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS]) { AudioChannelLayout* pChannelLayout; ma_result result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); result = ma_get_AudioObject_channel_layout(pContext, deviceObjectID, deviceType, &pChannelLayout); if (result != MA_SUCCESS) { @@ -18502,24 +19019,25 @@ ma_result ma_get_AudioObject_channel_map(ma_context* pContext, AudioObjectID dev result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap); if (result != MA_SUCCESS) { - ma_free(pChannelLayout); + ma_free(pChannelLayout, &pContext->allocationCallbacks); return result; } - ma_free(pChannelLayout); + ma_free(pChannelLayout, &pContext->allocationCallbacks); return result; } +#endif -ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */ +static ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, UInt32* pSampleRateRangesCount, AudioValueRange** ppSampleRateRanges) /* NOTE: Free the returned pointer with ma_free(). */ { AudioObjectPropertyAddress propAddress; UInt32 dataSize; OSStatus status; AudioValueRange* pSampleRateRanges; - ma_assert(pContext != NULL); - ma_assert(pSampleRateRangesCount != NULL); - ma_assert(ppSampleRateRanges != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateRangesCount != NULL); + MA_ASSERT(ppSampleRateRanges != NULL); /* Safety. */ *pSampleRateRangesCount = 0; @@ -18534,14 +19052,14 @@ ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID de return ma_result_from_OSStatus(status); } - pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize); + pSampleRateRanges = (AudioValueRange*)ma_malloc(dataSize, &pContext->allocationCallbacks); if (pSampleRateRanges == NULL) { return MA_OUT_OF_MEMORY; } status = ((ma_AudioObjectGetPropertyData_proc)pContext->coreaudio.AudioObjectGetPropertyData)(deviceObjectID, &propAddress, 0, NULL, &dataSize, pSampleRateRanges); if (status != noErr) { - ma_free(pSampleRateRanges); + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); return ma_result_from_OSStatus(status); } @@ -18550,14 +19068,15 @@ ma_result ma_get_AudioObject_sample_rates(ma_context* pContext, AudioObjectID de return MA_SUCCESS; } -ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut) +#if 0 +static ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 sampleRateIn, ma_uint32* pSampleRateOut) { UInt32 sampleRateRangeCount; AudioValueRange* pSampleRateRanges; ma_result result; - ma_assert(pContext != NULL); - ma_assert(pSampleRateOut != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pSampleRateOut != NULL); *pSampleRateOut = 0; /* Safety. */ @@ -18567,7 +19086,7 @@ ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, Audio } if (sampleRateRangeCount == 0) { - ma_free(pSampleRateRanges); + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); return MA_ERROR; /* Should never hit this case should we? */ } @@ -18581,7 +19100,7 @@ ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, Audio AudioValueRange caSampleRate = pSampleRateRanges[iCASampleRate]; if (caSampleRate.mMinimum <= malSampleRate && caSampleRate.mMaximum >= malSampleRate) { *pSampleRateOut = malSampleRate; - ma_free(pSampleRateRanges); + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); return MA_SUCCESS; } } @@ -18591,10 +19110,10 @@ ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, Audio If we get here it means none of miniaudio's standard sample rates matched any of the supported sample rates from the device. In this case we just fall back to the first one reported by Core Audio. */ - ma_assert(sampleRateRangeCount > 0); + MA_ASSERT(sampleRateRangeCount > 0); *pSampleRateOut = pSampleRateRanges[0].mMinimum; - ma_free(pSampleRateRanges); + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); return MA_SUCCESS; } else { /* Find the closest match to this sample rate. */ @@ -18604,7 +19123,7 @@ ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, Audio for (iRange = 0; iRange < sampleRateRangeCount; ++iRange) { if (pSampleRateRanges[iRange].mMinimum <= sampleRateIn && pSampleRateRanges[iRange].mMaximum >= sampleRateIn) { *pSampleRateOut = sampleRateIn; - ma_free(pSampleRateRanges); + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); return MA_SUCCESS; } else { UInt32 absoluteDifference; @@ -18621,28 +19140,28 @@ ma_result ma_get_AudioObject_get_closest_sample_rate(ma_context* pContext, Audio } } - ma_assert(iCurrentClosestRange != (UInt32)-1); + MA_ASSERT(iCurrentClosestRange != (UInt32)-1); *pSampleRateOut = pSampleRateRanges[iCurrentClosestRange].mMinimum; - ma_free(pSampleRateRanges); + ma_free(pSampleRateRanges, &pContext->allocationCallbacks); return MA_SUCCESS; } /* Should never get here, but it would mean we weren't able to find any suitable sample rates. */ - /*ma_free(pSampleRateRanges);*/ + /*ma_free(pSampleRateRanges, &pContext->allocationCallbacks);*/ /*return MA_ERROR;*/ } +#endif - -ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut) +static ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32 bufferSizeInFramesIn, ma_uint32* pBufferSizeInFramesOut) { AudioObjectPropertyAddress propAddress; AudioValueRange bufferSizeRange; UInt32 dataSize; OSStatus status; - ma_assert(pContext != NULL); - ma_assert(pBufferSizeInFramesOut != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pBufferSizeInFramesOut != NULL); *pBufferSizeInFramesOut = 0; /* Safety. */ @@ -18668,7 +19187,7 @@ ma_result ma_get_AudioObject_closest_buffer_size_in_frames(ma_context* pContext, return MA_SUCCESS; } -ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pBufferSizeInOut) +static ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_uint32* pBufferSizeInOut) { ma_result result; ma_uint32 chosenBufferSizeInFrames; @@ -18676,7 +19195,7 @@ ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioOb UInt32 dataSize; OSStatus status; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); result = ma_get_AudioObject_closest_buffer_size_in_frames(pContext, deviceObjectID, deviceType, *pBufferSizeInOut, &chosenBufferSizeInFrames); if (result != MA_SUCCESS) { @@ -18702,10 +19221,10 @@ ma_result ma_set_AudioObject_buffer_size_in_frames(ma_context* pContext, AudioOb } -ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID) +static ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, AudioObjectID* pDeviceObjectID) { - ma_assert(pContext != NULL); - ma_assert(pDeviceObjectID != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pDeviceObjectID != NULL); /* Safety. */ *pDeviceObjectID = 0; @@ -18755,7 +19274,7 @@ ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, if (ma_does_AudioObject_support_playback(pContext, deviceObjectID)) { if (strcmp(uid, pDeviceID->coreaudio) == 0) { *pDeviceObjectID = deviceObjectID; - ma_free(pDeviceObjectIDs); + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); return MA_SUCCESS; } } @@ -18763,14 +19282,14 @@ ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, if (ma_does_AudioObject_support_capture(pContext, deviceObjectID)) { if (strcmp(uid, pDeviceID->coreaudio) == 0) { *pDeviceObjectID = deviceObjectID; - ma_free(pDeviceObjectIDs); + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); return MA_SUCCESS; } } } } - ma_free(pDeviceObjectIDs); + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); } /* If we get here it means we couldn't find the device. */ @@ -18778,7 +19297,7 @@ ma_result ma_find_AudioObjectID(ma_context* pContext, ma_device_type deviceType, } -ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_bool32 usingDefaultFormat, ma_bool32 usingDefaultChannels, ma_bool32 usingDefaultSampleRate, AudioStreamBasicDescription* pFormat) +static ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID deviceObjectID, ma_device_type deviceType, ma_format format, ma_uint32 channels, ma_uint32 sampleRate, ma_bool32 usingDefaultFormat, ma_bool32 usingDefaultChannels, ma_bool32 usingDefaultSampleRate, AudioStreamBasicDescription* pFormat) { UInt32 deviceFormatDescriptionCount; AudioStreamRangedDescription* pDeviceFormatDescriptions; @@ -18837,7 +19356,7 @@ ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID dev If we get here it means we don't have an exact match to what the client is asking for. We'll need to find the closest one. The next loop will check for formats that have the same sample rate to what we're asking for. If there is, we prefer that one in all cases. */ - ma_zero_object(&bestDeviceFormatSoFar); + MA_ZERO_OBJECT(&bestDeviceFormatSoFar); hasSupportedFormat = MA_FALSE; for (iFormat = 0; iFormat < deviceFormatDescriptionCount; ++iFormat) { @@ -18851,7 +19370,7 @@ ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID dev } if (!hasSupportedFormat) { - ma_free(pDeviceFormatDescriptions); + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); return MA_FORMAT_NOT_SUPPORTED; } @@ -18974,12 +19493,12 @@ ma_result ma_find_best_format__coreaudio(ma_context* pContext, AudioObjectID dev *pFormat = bestDeviceFormatSoFar; - ma_free(pDeviceFormatDescriptions); + ma_free(pDeviceFormatDescriptions, &pContext->allocationCallbacks); return MA_SUCCESS; } #endif -ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS]) +static ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit, ma_device_type deviceType, ma_channel channelMap[MA_MAX_CHANNELS]) { AudioUnitScope deviceScope; AudioUnitElement deviceBus; @@ -18988,7 +19507,7 @@ ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit AudioChannelLayout* pChannelLayout; ma_result result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (deviceType == ma_device_type_playback) { deviceScope = kAudioUnitScope_Output; @@ -19003,38 +19522,38 @@ ma_result ma_get_AudioUnit_channel_map(ma_context* pContext, AudioUnit audioUnit return ma_result_from_OSStatus(status); } - pChannelLayout = (AudioChannelLayout*)ma_malloc(channelLayoutSize); + pChannelLayout = (AudioChannelLayout*)ma__malloc_from_callbacks(channelLayoutSize, &pContext->allocationCallbacks); if (pChannelLayout == NULL) { return MA_OUT_OF_MEMORY; } status = ((ma_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize); if (status != noErr) { - ma_free(pChannelLayout); + ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks); return ma_result_from_OSStatus(status); } result = ma_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap); if (result != MA_SUCCESS) { - ma_free(pChannelLayout); + ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks); return result; } - ma_free(pChannelLayout); + ma__free_from_callbacks(pChannelLayout, &pContext->allocationCallbacks); return MA_SUCCESS; } -ma_bool32 ma_context_is_device_id_equal__coreaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__coreaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return strcmp(pID0->coreaudio, pID1->coreaudio) == 0; } -ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { #if defined(MA_APPLE_DESKTOP) UInt32 deviceCount; @@ -19051,7 +19570,7 @@ ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_ AudioObjectID deviceObjectID = pDeviceObjectIDs[iDevice]; ma_device_info info; - ma_zero_object(&info); + MA_ZERO_OBJECT(&info); if (ma_get_AudioObject_uid(pContext, deviceObjectID, sizeof(info.id.coreaudio), info.id.coreaudio) != MA_SUCCESS) { continue; } @@ -19071,18 +19590,18 @@ ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_ } } - ma_free(pDeviceObjectIDs); + ma_free(pDeviceObjectIDs, &pContext->allocationCallbacks); #else /* Only supporting default devices on non-Desktop platforms. */ ma_device_info info; - ma_zero_object(&info); + MA_ZERO_OBJECT(&info); ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); if (!callback(pContext, ma_device_type_playback, &info, pUserData)) { return MA_SUCCESS; } - ma_zero_object(&info); + MA_ZERO_OBJECT(&info); ma_strncpy_s(info.name, sizeof(info.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); if (!callback(pContext, ma_device_type_capture, &info, pUserData)) { return MA_SUCCESS; @@ -19092,11 +19611,11 @@ ma_result ma_context_enumerate_devices__coreaudio(ma_context* pContext, ma_enum_ return MA_SUCCESS; } -ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_result result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); /* No exclusive mode with the Core Audio backend for now. */ if (shareMode == ma_share_mode_exclusive) { @@ -19144,7 +19663,7 @@ ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_ continue; } - ma_assert(format != ma_format_unknown); + MA_ASSERT(format != ma_format_unknown); /* Make sure the format isn't already in the output list. */ for (iOutputFormat = 0; iOutputFormat < pDeviceInfo->formatCount; ++iOutputFormat) { @@ -19159,7 +19678,7 @@ ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_ } } - ma_free(pStreamDescriptions); + ma_free(pStreamDescriptions, &pContext->allocationCallbacks); /* Channels. */ @@ -19258,7 +19777,7 @@ ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_ */ @autoreleasepool { AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; - ma_assert(pAudioSession != NULL); + MA_ASSERT(pAudioSession != NULL); pDeviceInfo->minSampleRate = (ma_uint32)pAudioSession.sampleRate; pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; @@ -19271,12 +19790,12 @@ ma_result ma_context_get_device_info__coreaudio(ma_context* pContext, ma_device_ } -OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList) +static OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pBufferList) { ma_device* pDevice = (ma_device*)pUserData; ma_stream_layout layout; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); #if defined(MA_DEBUG_OUTPUT) printf("INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pBufferList->mNumberBuffers); @@ -19311,7 +19830,7 @@ OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pA not interleaved, in which case we can't handle right now since miniaudio does not yet support non-interleaved streams. We just output silence here. */ - ma_zero_memory(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); + MA_ZERO_MEMORY(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); #if defined(MA_DEBUG_OUTPUT) printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); @@ -19320,33 +19839,41 @@ OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pA } } else { /* This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This assumes each buffer is the same size. */ - ma_uint8 tempBuffer[4096]; - UInt32 iBuffer; - for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) { - ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat); - ma_uint32 framesRemaining = frameCountPerBuffer; + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pBufferList->mNumberBuffers % pDevice->playback.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + UInt32 iBuffer; + + for (iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->playback.internalChannels) { + ma_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / ma_get_bytes_per_sample(pDevice->playback.internalFormat); + ma_uint32 framesRemaining = frameCountPerBuffer; - while (framesRemaining > 0) { - void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; - ma_uint32 iChannel; - ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - if (framesToRead > framesRemaining) { - framesToRead = framesRemaining; - } - - if (pDevice->type == ma_device_type_duplex) { - ma_device__handle_duplex_callback_playback(pDevice, framesToRead, tempBuffer, &pDevice->coreaudio.duplexRB); - } else { - ma_device__read_frames_from_client(pDevice, framesToRead, tempBuffer); - } - - for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { - ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToRead = sizeof(tempBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + if (framesToRead > framesRemaining) { + framesToRead = framesRemaining; + } + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, framesToRead, tempBuffer, &pDevice->coreaudio.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, framesToRead, tempBuffer); + } + + for (iChannel = 0; iChannel < pDevice->playback.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pBufferList->mBuffers[iBuffer+iChannel].mData, (frameCountPerBuffer - framesRemaining) * ma_get_bytes_per_sample(pDevice->playback.internalFormat)); + } + + ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers); + + framesRemaining -= framesToRead; } - - ma_deinterleave_pcm_frames(pDevice->playback.internalFormat, pDevice->playback.internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers); - - framesRemaining -= framesToRead; } } } @@ -19358,17 +19885,17 @@ OSStatus ma_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pA return noErr; } -OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList) +static OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pActionFlags, const AudioTimeStamp* pTimeStamp, UInt32 busNumber, UInt32 frameCount, AudioBufferList* pUnusedBufferList) { ma_device* pDevice = (ma_device*)pUserData; AudioBufferList* pRenderedBufferList; ma_stream_layout layout; OSStatus status; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; - ma_assert(pRenderedBufferList); + MA_ASSERT(pRenderedBufferList); /* We need to check whether or not we are outputting interleaved or non-interleaved samples. The way we do this is slightly different for each type. */ layout = ma_stream_layout_interleaved; @@ -19408,7 +19935,7 @@ OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pAc ma_uint8 silentBuffer[4096]; ma_uint32 framesRemaining; - ma_zero_memory(silentBuffer, sizeof(silentBuffer)); + MA_ZERO_MEMORY(silentBuffer, sizeof(silentBuffer)); framesRemaining = frameCount; while (framesRemaining > 0) { @@ -19433,31 +19960,38 @@ OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pAc } } else { /* This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This assumes each buffer is the same size. */ - ma_uint8 tempBuffer[4096]; - UInt32 iBuffer; - for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) { - ma_uint32 framesRemaining = frameCount; - while (framesRemaining > 0) { - void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; - ma_uint32 iChannel; - ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_sample(pDevice->capture.internalFormat); - if (framesToSend > framesRemaining) { - framesToSend = framesRemaining; - } - - for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { - ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); - } - - ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer); + + /* + For safety we'll check that the internal channels is a multiple of the buffer count. If it's not it means something + very strange has happened and we're not going to support it. + */ + if ((pRenderedBufferList->mNumberBuffers % pDevice->capture.internalChannels) == 0) { + ma_uint8 tempBuffer[4096]; + UInt32 iBuffer; + for (iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->capture.internalChannels) { + ma_uint32 framesRemaining = frameCount; + while (framesRemaining > 0) { + void* ppDeinterleavedBuffers[MA_MAX_CHANNELS]; + ma_uint32 iChannel; + ma_uint32 framesToSend = sizeof(tempBuffer) / ma_get_bytes_per_sample(pDevice->capture.internalFormat); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + for (iChannel = 0; iChannel < pDevice->capture.internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)ma_offset_ptr(pRenderedBufferList->mBuffers[iBuffer+iChannel].mData, (frameCount - framesRemaining) * ma_get_bytes_per_sample(pDevice->capture.internalFormat)); + } + + ma_interleave_pcm_frames(pDevice->capture.internalFormat, pDevice->capture.internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer); - if (pDevice->type == ma_device_type_duplex) { - ma_device__handle_duplex_callback_capture(pDevice, framesToSend, tempBuffer, &pDevice->coreaudio.duplexRB); - } else { - ma_device__send_frames_to_client(pDevice, framesToSend, tempBuffer); - } + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, framesToSend, tempBuffer, &pDevice->coreaudio.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, framesToSend, tempBuffer); + } - framesRemaining -= framesToSend; + framesRemaining -= framesToSend; + } } } } @@ -19471,10 +20005,10 @@ OSStatus ma_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pAc return noErr; } -void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element) +static void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPropertyID propertyID, AudioUnitScope scope, AudioUnitElement element) { ma_device* pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* There's been a report of a deadlock here when triggered by ma_device_uninit(). It looks like @@ -19549,7 +20083,7 @@ static ma_device** g_ppTrackedDevices_CoreAudio = NULL; static ma_uint32 g_TrackedDeviceCap_CoreAudio = 0; static ma_uint32 g_TrackedDeviceCount_CoreAudio = 0; -OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData) +static OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 addressCount, const AudioObjectPropertyAddress* pAddresses, void* pUserData) { ma_device_type deviceType; @@ -19621,7 +20155,7 @@ OSStatus ma_default_device_changed__coreaudio(AudioObjectID objectID, UInt32 add static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContext) { - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (ma_atomic_increment_32(&g_DeviceTrackingInitCounter_CoreAudio) == 1) { AudioObjectPropertyAddress propAddress; @@ -19642,7 +20176,7 @@ static ma_result ma_context__init_device_tracking__coreaudio(ma_context* pContex static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pContext) { - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); if (ma_atomic_decrement_32(&g_DeviceTrackingInitCounter_CoreAudio) == 0) { AudioObjectPropertyAddress propAddress; @@ -19656,8 +20190,8 @@ static ma_result ma_context__uninit_device_tracking__coreaudio(ma_context* pCont ((ma_AudioObjectRemovePropertyListener_proc)pContext->coreaudio.AudioObjectRemovePropertyListener)(kAudioObjectSystemObject, &propAddress, &ma_default_device_changed__coreaudio, NULL); /* At this point there should be no tracked devices. If so there's an error somewhere. */ - ma_assert(g_ppTrackedDevices_CoreAudio == NULL); - ma_assert(g_TrackedDeviceCount_CoreAudio == 0); + MA_ASSERT(g_ppTrackedDevices_CoreAudio == NULL); + MA_ASSERT(g_TrackedDeviceCount_CoreAudio == 0); ma_mutex_uninit(&g_DeviceTrackingMutex_CoreAudio); } @@ -19669,7 +20203,7 @@ static ma_result ma_device__track__coreaudio(ma_device* pDevice) { ma_result result; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); result = ma_context__init_device_tracking__coreaudio(pDevice->pContext); if (result != MA_SUCCESS) { @@ -19680,15 +20214,17 @@ static ma_result ma_device__track__coreaudio(ma_device* pDevice) { /* Allocate memory if required. */ if (g_TrackedDeviceCap_CoreAudio <= g_TrackedDeviceCount_CoreAudio) { + ma_uint32 oldCap; ma_uint32 newCap; ma_device** ppNewDevices; + oldCap = g_TrackedDeviceCap_CoreAudio; newCap = g_TrackedDeviceCap_CoreAudio * 2; if (newCap == 0) { newCap = 1; } - ppNewDevices = (ma_device**)ma_realloc(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio) * newCap); + ppNewDevices = (ma_device**)ma__realloc_from_callbacks(g_ppTrackedDevices_CoreAudio, sizeof(*g_ppTrackedDevices_CoreAudio)*newCap, sizeof(*g_ppTrackedDevices_CoreAudio)*oldCap, &pDevice->pContext->allocationCallbacks); if (ppNewDevices == NULL) { ma_mutex_unlock(&g_DeviceTrackingMutex_CoreAudio); return MA_OUT_OF_MEMORY; @@ -19710,7 +20246,7 @@ static ma_result ma_device__untrack__coreaudio(ma_device* pDevice) { ma_result result; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); ma_mutex_lock(&g_DeviceTrackingMutex_CoreAudio); { @@ -19727,7 +20263,7 @@ static ma_result ma_device__untrack__coreaudio(ma_device* pDevice) /* If there's nothing else in the list we need to free memory. */ if (g_TrackedDeviceCount_CoreAudio == 0) { - ma_free(g_ppTrackedDevices_CoreAudio); + ma__free_from_callbacks(g_ppTrackedDevices_CoreAudio, &pDevice->pContext->allocationCallbacks); g_ppTrackedDevices_CoreAudio = NULL; g_TrackedDeviceCap_CoreAudio = 0; } @@ -19747,10 +20283,109 @@ static ma_result ma_device__untrack__coreaudio(ma_device* pDevice) } #endif -void ma_device_uninit__coreaudio(ma_device* pDevice) +#if defined(MA_APPLE_MOBILE) +@interface ma_router_change_handler:NSObject { + ma_device* m_pDevice; +} +@end + +@implementation ma_router_change_handler +-(id)init:(ma_device*)pDevice +{ + self = [super init]; + m_pDevice = pDevice; + + [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(handle_route_change:) name:AVAudioSessionRouteChangeNotification object:[AVAudioSession sharedInstance]]; + + return self; +} + +-(void)dealloc +{ + [self remove_handler]; +} + +-(void)remove_handler +{ + [[NSNotificationCenter defaultCenter] removeObserver:self name:@"AVAudioSessionRouteChangeNotification" object:nil]; +} + +-(void)handle_route_change:(NSNotification*)pNotification +{ + AVAudioSession* pSession = [AVAudioSession sharedInstance]; + + NSInteger reason = [[[pNotification userInfo] objectForKey:AVAudioSessionRouteChangeReasonKey] integerValue]; + switch (reason) + { + case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOldDeviceUnavailable\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonNewDeviceAvailable: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNewDeviceAvailable\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonWakeFromSleep: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonWakeFromSleep\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonOverride: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonOverride\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonCategoryChange: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonCategoryChange\n"); + #endif + } break; + + case AVAudioSessionRouteChangeReasonUnknown: + default: + { + #if defined(MA_DEBUG_OUTPUT) + printf("[Core Audio] Route Changed: AVAudioSessionRouteChangeReasonUnknown\n"); + #endif + } break; + } + + m_pDevice->sampleRate = (ma_uint32)pSession.sampleRate; + + if (m_pDevice->type == ma_device_type_capture || m_pDevice->type == ma_device_type_duplex) { + m_pDevice->capture.channels = (ma_uint32)pSession.inputNumberOfChannels; + ma_device__post_init_setup(m_pDevice, ma_device_type_capture); + } + if (m_pDevice->type == ma_device_type_playback || m_pDevice->type == ma_device_type_duplex) { + m_pDevice->playback.channels = (ma_uint32)pSession.outputNumberOfChannels; + ma_device__post_init_setup(m_pDevice, ma_device_type_playback); + } +} +@end +#endif + +static void ma_device_uninit__coreaudio(ma_device* pDevice) { - ma_assert(pDevice != NULL); - ma_assert(ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED); + MA_ASSERT(pDevice != NULL); + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED); #if defined(MA_APPLE_DESKTOP) /* @@ -19759,6 +20394,12 @@ void ma_device_uninit__coreaudio(ma_device* pDevice) */ ma_device__untrack__coreaudio(pDevice); #endif +#if defined(MA_APPLE_MOBILE) + if (pDevice->coreaudio.pRouteChangeHandler != NULL) { + ma_router_change_handler* pRouteChangeHandler = (__bridge_transfer ma_router_change_handler*)pDevice->coreaudio.pRouteChangeHandler; + [pRouteChangeHandler remove_handler]; + } +#endif if (pDevice->coreaudio.audioUnitCapture != NULL) { ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); @@ -19768,7 +20409,7 @@ void ma_device_uninit__coreaudio(ma_device* pDevice) } if (pDevice->coreaudio.pAudioBufferList) { - ma_free(pDevice->coreaudio.pAudioBufferList); + ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); } if (pDevice->type == ma_device_type_duplex) { @@ -19809,7 +20450,7 @@ typedef struct char deviceName[256]; } ma_device_init_internal_data__coreaudio; -ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */ +static ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_device_init_internal_data__coreaudio* pData, void* pDevice_DoNotReference) /* <-- pDevice is typed as void* intentionally so as to avoid accidentally referencing it. */ { ma_result result; OSStatus status; @@ -19826,8 +20467,8 @@ ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_typ return MA_INVALID_ARGS; } - ma_assert(pContext != NULL); - ma_assert(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture); + MA_ASSERT(pContext != NULL); + MA_ASSERT(deviceType == ma_device_type_playback || deviceType == ma_device_type_capture); #if defined(MA_APPLE_DESKTOP) pData->deviceObjectID = 0; @@ -19953,10 +20594,21 @@ ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_typ */ @autoreleasepool { AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; - ma_assert(pAudioSession != NULL); + MA_ASSERT(pAudioSession != NULL); [pAudioSession setPreferredSampleRate:(double)pData->sampleRateIn error:nil]; bestFormat.mSampleRate = pAudioSession.sampleRate; + + /* + I've had a report that the channel count returned by AudioUnitGetProperty above is inconsistent with + AVAudioSession outputNumberOfChannels. I'm going to try using the AVAudioSession values instead. + */ + if (deviceType == ma_device_type_playback) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.outputNumberOfChannels; + } + if (deviceType == ma_device_type_capture) { + bestFormat.mChannelsPerFrame = (UInt32)pAudioSession.inputNumberOfChannels; + } } status = ((ma_AudioUnitSetProperty_proc)pContext->coreaudio.AudioUnitSetProperty)(pData->audioUnit, kAudioUnitProperty_StreamFormat, formatScope, formatElement, &bestFormat, sizeof(bestFormat)); @@ -20071,7 +20723,7 @@ ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_typ allocationSize += actualBufferSizeInFrames * ma_get_bytes_per_sample(pData->formatOut) * pData->channelsOut; } - pBufferList = (AudioBufferList*)ma_malloc(allocationSize); + pBufferList = (AudioBufferList*)ma__malloc_from_callbacks(allocationSize, &pContext->allocationCallbacks); if (pBufferList == NULL) { ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); return MA_OUT_OF_MEMORY; @@ -20125,7 +20777,7 @@ ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_typ /* Initialize the audio unit. */ status = ((ma_AudioUnitInitialize_proc)pContext->coreaudio.AudioUnitInitialize)(pData->audioUnit); if (status != noErr) { - ma_free(pData->pAudioBufferList); + ma__free_from_callbacks(pData->pAudioBufferList, &pContext->allocationCallbacks); pData->pAudioBufferList = NULL; ((ma_AudioComponentInstanceDispose_proc)pContext->coreaudio.AudioComponentInstanceDispose)(pData->audioUnit); return ma_result_from_OSStatus(status); @@ -20145,7 +20797,7 @@ ma_result ma_device_init_internal__coreaudio(ma_context* pContext, ma_device_typ return result; } -ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit) +static ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_type deviceType, ma_bool32 disposePreviousAudioUnit) { ma_device_init_internal_data__coreaudio data; ma_result result; @@ -20159,7 +20811,7 @@ ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_typ data.formatIn = pDevice->capture.format; data.channelsIn = pDevice->capture.channels; data.sampleRateIn = pDevice->sampleRate; - ma_copy_memory(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pDevice->capture.channelMap, sizeof(pDevice->capture.channelMap)); data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; @@ -20172,13 +20824,13 @@ ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_typ ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); } if (pDevice->coreaudio.pAudioBufferList) { - ma_free(pDevice->coreaudio.pAudioBufferList); + ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); } } else if (deviceType == ma_device_type_playback) { data.formatIn = pDevice->playback.format; data.channelsIn = pDevice->playback.channels; data.sampleRateIn = pDevice->sampleRate; - ma_copy_memory(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pDevice->playback.channelMap, sizeof(pDevice->playback.channelMap)); data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; @@ -20215,7 +20867,7 @@ ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_typ pDevice->capture.internalFormat = data.formatOut; pDevice->capture.internalChannels = data.channelsOut; pDevice->capture.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->capture.internalPeriods = data.periodsOut; } else if (deviceType == ma_device_type_playback) { @@ -20227,7 +20879,7 @@ ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_typ pDevice->playback.internalFormat = data.formatOut; pDevice->playback.internalChannels = data.channelsOut; pDevice->playback.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->playback.internalPeriods = data.periodsOut; } @@ -20236,13 +20888,13 @@ ma_result ma_device_reinit_internal__coreaudio(ma_device* pDevice, ma_device_typ } -ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result; - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -20260,7 +20912,7 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config data.formatIn = pConfig->capture.format; data.channelsIn = pConfig->capture.channels; data.sampleRateIn = pConfig->sampleRate; - ma_copy_memory(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pConfig->capture.channelMap, sizeof(pConfig->capture.channelMap)); data.usingDefaultFormat = pDevice->capture.usingDefaultFormat; data.usingDefaultChannels = pDevice->capture.usingDefaultChannels; data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; @@ -20291,7 +20943,7 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config pDevice->capture.internalFormat = data.formatOut; pDevice->capture.internalChannels = data.channelsOut; pDevice->capture.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->capture.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->capture.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->capture.internalPeriods = data.periodsOut; @@ -20312,7 +20964,7 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config data.formatIn = pConfig->playback.format; data.channelsIn = pConfig->playback.channels; data.sampleRateIn = pConfig->sampleRate; - ma_copy_memory(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap)); + MA_COPY_MEMORY(data.channelMapIn, pConfig->playback.channelMap, sizeof(pConfig->playback.channelMap)); data.usingDefaultFormat = pDevice->playback.usingDefaultFormat; data.usingDefaultChannels = pDevice->playback.usingDefaultChannels; data.usingDefaultSampleRate = pDevice->usingDefaultSampleRate; @@ -20336,7 +20988,7 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config if (pConfig->deviceType == ma_device_type_duplex) { ((ma_AudioComponentInstanceDispose_proc)pDevice->pContext->coreaudio.AudioComponentInstanceDispose)((AudioUnit)pDevice->coreaudio.audioUnitCapture); if (pDevice->coreaudio.pAudioBufferList) { - ma_free(pDevice->coreaudio.pAudioBufferList); + ma__free_from_callbacks(pDevice->coreaudio.pAudioBufferList, &pDevice->pContext->allocationCallbacks); } } return result; @@ -20351,7 +21003,7 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config pDevice->playback.internalFormat = data.formatOut; pDevice->playback.internalChannels = data.channelsOut; pDevice->playback.internalSampleRate = data.sampleRateOut; - ma_copy_memory(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); + MA_COPY_MEMORY(pDevice->playback.internalChannelMap, data.channelMapOut, sizeof(data.channelMapOut)); pDevice->playback.internalBufferSizeInFrames = data.bufferSizeInFramesOut; pDevice->playback.internalPeriods = data.periodsOut; @@ -20379,7 +21031,7 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config /* Need a ring buffer for duplex mode. */ if (pConfig->deviceType == ma_device_type_duplex) { ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames); - ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->coreaudio.duplexRB); + ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->coreaudio.duplexRB); if (result != MA_SUCCESS) { return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[Core Audio] Failed to initialize ring buffer.", result); } @@ -20390,19 +21042,27 @@ ma_result ma_device_init__coreaudio(ma_context* pContext, const ma_device_config void* pBufferData; ma_pcm_rb_acquire_write(&pDevice->coreaudio.duplexRB, &bufferSizeInFrames, &pBufferData); { - ma_zero_memory(pBufferData, bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + MA_ZERO_MEMORY(pBufferData, bufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); } ma_pcm_rb_commit_write(&pDevice->coreaudio.duplexRB, bufferSizeInFrames, pBufferData); } } + /* + We need to detect when a route has changed so we can update the data conversion pipeline accordingly. This is done + differently on non-Desktop Apple platforms. + */ +#if defined(MA_APPLE_MOBILE) + pDevice->coreaudio.pRouteChangeHandler = (__bridge_retained void*)[[ma_router_change_handler alloc] init:pDevice]; +#endif + return MA_SUCCESS; } -ma_result ma_device_start__coreaudio(ma_device* pDevice) +static ma_result ma_device_start__coreaudio(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { OSStatus status = ((ma_AudioOutputUnitStart_proc)pDevice->pContext->coreaudio.AudioOutputUnitStart)((AudioUnit)pDevice->coreaudio.audioUnitCapture); @@ -20424,9 +21084,9 @@ ma_result ma_device_start__coreaudio(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_stop__coreaudio(ma_device* pDevice) +static ma_result ma_device_stop__coreaudio(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { OSStatus status = ((ma_AudioOutputUnitStop_proc)pDevice->pContext->coreaudio.AudioOutputUnitStop)((AudioUnit)pDevice->coreaudio.audioUnitCapture); @@ -20448,10 +21108,10 @@ ma_result ma_device_stop__coreaudio(ma_device* pDevice) } -ma_result ma_context_uninit__coreaudio(ma_context* pContext) +static ma_result ma_context_uninit__coreaudio(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_coreaudio); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_coreaudio); #if !defined(MA_NO_RUNTIME_LINKING) && !defined(MA_APPLE_MOBILE) ma_dlclose(pContext, pContext->coreaudio.hAudioUnit); @@ -20463,23 +21123,63 @@ ma_result ma_context_uninit__coreaudio(ma_context* pContext) return MA_SUCCESS; } -ma_result ma_context_init__coreaudio(const ma_context_config* pConfig, ma_context* pContext) +#if defined(MA_APPLE_MOBILE) +static AVAudioSessionCategory ma_to_AVAudioSessionCategory(ma_ios_session_category category) +{ + /* The "default" and "none" categories are treated different and should not be used as an input into this function. */ + MA_ASSERT(category != ma_ios_session_category_default); + MA_ASSERT(category != ma_ios_session_category_none); + + switch (category) { + case ma_ios_session_category_ambient: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_solo_ambient: return AVAudioSessionCategorySoloAmbient; + case ma_ios_session_category_playback: return AVAudioSessionCategoryPlayback; + case ma_ios_session_category_record: return AVAudioSessionCategoryRecord; + case ma_ios_session_category_play_and_record: return AVAudioSessionCategoryPlayAndRecord; + case ma_ios_session_category_multi_route: return AVAudioSessionCategoryMultiRoute; + case ma_ios_session_category_none: return AVAudioSessionCategoryAmbient; + case ma_ios_session_category_default: return AVAudioSessionCategoryAmbient; + default: return AVAudioSessionCategoryAmbient; + } +} +#endif + +static ma_result ma_context_init__coreaudio(const ma_context_config* pConfig, ma_context* pContext) { - ma_assert(pContext != NULL); - - (void)pConfig; + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pContext != NULL); #if defined(MA_APPLE_MOBILE) @autoreleasepool { AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; - ma_assert(pAudioSession != NULL); + AVAudioSessionCategoryOptions options = pConfig->coreaudio.sessionCategoryOptions; - [pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord error:nil]; - - /* By default we want miniaudio to use the speakers instead of the receiver. In the future this may be customizable. */ - ma_bool32 useSpeakers = MA_TRUE; - if (useSpeakers) { - [pAudioSession overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:nil]; + MA_ASSERT(pAudioSession != NULL); + + if (pConfig->coreaudio.sessionCategory == ma_ios_session_category_default) { + /* + I'm going to use trial and error to determine our default session category. First we'll try PlayAndRecord. If that fails + we'll try Playback and if that fails we'll try record. If all of these fail we'll just not set the category. + */ + #if !defined(MA_APPLE_TV) && !defined(MA_APPLE_WATCH) + options |= AVAudioSessionCategoryOptionDefaultToSpeaker; + #endif + + if ([pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord withOptions:options error:nil]) { + /* Using PlayAndRecord */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryPlayback withOptions:options error:nil]) { + /* Using Playback */ + } else if ([pAudioSession setCategory: AVAudioSessionCategoryRecord withOptions:options error:nil]) { + /* Using Record */ + } else { + /* Leave as default? */ + } + } else { + if (pConfig->coreaudio.sessionCategory != ma_ios_session_category_none) { + if (![pAudioSession setCategory: ma_to_AVAudioSessionCategory(pConfig->coreaudio.sessionCategory) withOptions:options error:nil]) { + return MA_INVALID_OPERATION; /* Failed to set session category. */ + } + } } } #endif @@ -20490,8 +21190,8 @@ ma_result ma_context_init__coreaudio(const ma_context_config* pConfig, ma_contex return MA_API_NOT_FOUND; } - pContext->coreaudio.CFStringGetCString = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFStringGetCString"); - pContext->coreaudio.CFRelease = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFRelease"); + pContext->coreaudio.CFStringGetCString = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFStringGetCString"); + pContext->coreaudio.CFRelease = ma_dlsym(pContext, pContext->coreaudio.hCoreFoundation, "CFRelease"); pContext->coreaudio.hCoreAudio = ma_dlopen(pContext, "CoreAudio.framework/CoreAudio"); @@ -20698,7 +21398,19 @@ typedef int (* ma_sio_start_proc) (struct ma_sio_hdl*); typedef int (* ma_sio_stop_proc) (struct ma_sio_hdl*); typedef int (* ma_sio_initpar_proc)(struct ma_sio_par*); -ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb) +static ma_uint32 ma_get_standard_sample_rate_priority_index__sndio(ma_uint32 sampleRate) /* Lower = higher priority */ +{ + ma_uint32 i; + for (i = 0; i < ma_countof(g_maStandardSampleRatePriorities); ++i) { + if (g_maStandardSampleRatePriorities[i] == sampleRate) { + return i; + } + } + + return (ma_uint32)-1; +} + +static ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, unsigned int sig, unsigned int le, unsigned int msb) { /* We only support native-endian right now. */ if ((ma_is_little_endian() && le == 0) || (ma_is_big_endian() && le == 1)) { @@ -20724,12 +21436,12 @@ ma_format ma_format_from_sio_enc__sndio(unsigned int bits, unsigned int bps, uns return ma_format_unknown; } -ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps) +static ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps) { ma_format bestFormat; unsigned int iConfig; - ma_assert(caps != NULL); + MA_ASSERT(caps != NULL); bestFormat = ma_format_unknown; for (iConfig = 0; iConfig < caps->nconf; iConfig += 1) { @@ -20769,13 +21481,13 @@ ma_format ma_find_best_format_from_sio_cap__sndio(struct ma_sio_cap* caps) return ma_format_unknown; } -ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat) +static ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat) { ma_uint32 maxChannels; unsigned int iConfig; - ma_assert(caps != NULL); - ma_assert(requiredFormat != ma_format_unknown); + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); /* Just pick whatever configuration has the most channels. */ maxChannels = 0; @@ -20836,16 +21548,16 @@ ma_uint32 ma_find_best_channels_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_ return maxChannels; } -ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels) +static ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, ma_device_type deviceType, ma_format requiredFormat, ma_uint32 requiredChannels) { ma_uint32 firstSampleRate; ma_uint32 bestSampleRate; unsigned int iConfig; - ma_assert(caps != NULL); - ma_assert(requiredFormat != ma_format_unknown); - ma_assert(requiredChannels > 0); - ma_assert(requiredChannels <= MA_MAX_CHANNELS); + MA_ASSERT(caps != NULL); + MA_ASSERT(requiredFormat != ma_format_unknown); + MA_ASSERT(requiredChannels > 0); + MA_ASSERT(requiredChannels <= MA_MAX_CHANNELS); firstSampleRate = 0; /* <-- If the device does not support a standard rate we'll fall back to the first one that's found. */ bestSampleRate = 0; @@ -20912,12 +21624,12 @@ ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, } /* Disregard this rate if it's not a standard one. */ - ratePriority = ma_get_standard_sample_rate_priority_index(rate); + ratePriority = ma_get_standard_sample_rate_priority_index__sndio(rate); if (ratePriority == (ma_uint32)-1) { continue; } - if (ma_get_standard_sample_rate_priority_index(bestSampleRate) > ratePriority) { /* Lower = better. */ + if (ma_get_standard_sample_rate_priority_index__sndio(bestSampleRate) > ratePriority) { /* Lower = better. */ bestSampleRate = rate; } } @@ -20934,23 +21646,23 @@ ma_uint32 ma_find_best_sample_rate_from_sio_cap__sndio(struct ma_sio_cap* caps, } -ma_bool32 ma_context_is_device_id_equal__sndio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__sndio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return ma_strcmp(pID0->sndio, pID1->sndio) == 0; } -ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_bool32 isTerminating = MA_FALSE; struct ma_sio_hdl* handle; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); /* sndio doesn't seem to have a good device enumeration API, so I'm therefore only enumerating over default devices for now. */ @@ -20960,7 +21672,7 @@ ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devi if (handle != NULL) { /* Supports playback. */ ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), MA_SIO_DEVANY); ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME); @@ -20976,7 +21688,7 @@ ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devi if (handle != NULL) { /* Supports capture. */ ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_strcpy_s(deviceInfo.id.sndio, sizeof(deviceInfo.id.sndio), "default"); ma_strcpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME); @@ -20989,14 +21701,14 @@ ma_result ma_context_enumerate_devices__sndio(ma_context* pContext, ma_enum_devi return MA_SUCCESS; } -ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { char devid[256]; struct ma_sio_hdl* handle; struct ma_sio_cap caps; unsigned int iConfig; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)shareMode; /* We need to open the device before we can get information about it. */ @@ -21110,9 +21822,9 @@ ma_result ma_context_get_device_info__sndio(ma_context* pContext, ma_device_type return MA_SUCCESS; } -void ma_device_uninit__sndio(ma_device* pDevice) +static void ma_device_uninit__sndio(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((ma_sio_close_proc)pDevice->pContext->sndio.sio_close)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); @@ -21123,7 +21835,7 @@ void ma_device_uninit__sndio(ma_device* pDevice) } } -ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +static ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) { const char* pDeviceName; ma_ptr handle; @@ -21140,10 +21852,10 @@ ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_con ma_uint32 internalBufferSizeInFrames; ma_uint32 internalPeriods; - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(deviceType != ma_device_type_duplex); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); if (deviceType == ma_device_type_capture) { openFlags = MA_SIO_REC; @@ -21307,11 +22019,11 @@ ma_result ma_device_init_handle__sndio(ma_context* pContext, const ma_device_con return MA_SUCCESS; } -ma_result ma_device_init__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__sndio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); - ma_zero_object(&pDevice->sndio); + MA_ZERO_OBJECT(&pDevice->sndio); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -21334,9 +22046,9 @@ ma_result ma_device_init__sndio(ma_context* pContext, const ma_device_config* pC return MA_SUCCESS; } -ma_result ma_device_stop__sndio(ma_device* pDevice) +static ma_result ma_device_stop__sndio(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ((ma_sio_stop_proc)pDevice->pContext->sndio.sio_stop)((struct ma_sio_hdl*)pDevice->sndio.handleCapture); @@ -21349,7 +22061,7 @@ ma_result ma_device_stop__sndio(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { int result; @@ -21369,7 +22081,7 @@ ma_result ma_device_write__sndio(ma_device* pDevice, const void* pPCMFrames, ma_ return MA_SUCCESS; } -ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) { int result; @@ -21389,7 +22101,7 @@ ma_result ma_device_read__sndio(ma_device* pDevice, void* pPCMFrames, ma_uint32 return MA_SUCCESS; } -ma_result ma_device_main_loop__sndio(ma_device* pDevice) +static ma_result ma_device_main_loop__sndio(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; @@ -21408,77 +22120,88 @@ ma_result ma_device_main_loop__sndio(ma_device* pDevice) case ma_device_type_duplex: { /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__sndio(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__sndio(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__sndio(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__sndio(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__sndio()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } break; @@ -21549,16 +22272,16 @@ ma_result ma_device_main_loop__sndio(ma_device* pDevice) return result; } -ma_result ma_context_uninit__sndio(ma_context* pContext) +static ma_result ma_context_uninit__sndio(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_sndio); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_sndio); (void)pContext; return MA_SUCCESS; } -ma_result ma_context_init__sndio(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__sndio(const ma_context_config* pConfig, ma_context* pContext) { #ifndef MA_NO_RUNTIME_LINKING const char* libsndioNames[] = { @@ -21638,30 +22361,30 @@ audio(4) Backend #endif #endif -void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex) +static void ma_construct_device_id__audio4(char* id, size_t idSize, const char* base, int deviceIndex) { size_t baseLen; - ma_assert(id != NULL); - ma_assert(idSize > 0); - ma_assert(deviceIndex >= 0); + MA_ASSERT(id != NULL); + MA_ASSERT(idSize > 0); + MA_ASSERT(deviceIndex >= 0); baseLen = strlen(base); - ma_assert(idSize > baseLen); + MA_ASSERT(idSize > baseLen); ma_strcpy_s(id, idSize, base); ma_itoa_s(deviceIndex, id+baseLen, idSize-baseLen, 10); } -ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut) +static ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* base, int* pIndexOut) { size_t idLen; size_t baseLen; const char* deviceIndexStr; - ma_assert(id != NULL); - ma_assert(base != NULL); - ma_assert(pIndexOut != NULL); + MA_ASSERT(id != NULL); + MA_ASSERT(base != NULL); + MA_ASSERT(pIndexOut != NULL); idLen = strlen(id); baseLen = strlen(base); @@ -21685,18 +22408,18 @@ ma_result ma_extract_device_index_from_id__audio4(const char* id, const char* ba return MA_SUCCESS; } -ma_bool32 ma_context_is_device_id_equal__audio4(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__audio4(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return ma_strcmp(pID0->audio4, pID1->audio4) == 0; } #if !defined(MA_AUDIO4_USE_NEW_API) /* Old API */ -ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision) +static ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int precision) { if (precision == 8 && (encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR || encoding == AUDIO_ENCODING_ULINEAR_LE || encoding == AUDIO_ENCODING_ULINEAR_BE)) { return ma_format_u8; @@ -21723,11 +22446,11 @@ ma_format ma_format_from_encoding__audio4(unsigned int encoding, unsigned int pr return ma_format_unknown; /* Encoding not supported. */ } -void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision) +static void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, unsigned int* pPrecision) { - ma_assert(format != ma_format_unknown); - ma_assert(pEncoding != NULL); - ma_assert(pPrecision != NULL); + MA_ASSERT(format != ma_format_unknown); + MA_ASSERT(pEncoding != NULL); + MA_ASSERT(pPrecision != NULL); switch (format) { @@ -21759,12 +22482,12 @@ void ma_encoding_from_format__audio4(ma_format format, unsigned int* pEncoding, } } -ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo) +static ma_format ma_format_from_prinfo__audio4(struct audio_prinfo* prinfo) { return ma_format_from_encoding__audio4(prinfo->encoding, prinfo->precision); } #else -ma_format ma_format_from_swpar__audio4(struct audio_swpar* par) +static ma_format ma_format_from_swpar__audio4(struct audio_swpar* par) { if (par->bits == 8 && par->bps == 1 && par->sig == 0) { return ma_format_u8; @@ -21784,7 +22507,7 @@ ma_format ma_format_from_swpar__audio4(struct audio_swpar* par) } #endif -ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pInfoOut) +static ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_device_type deviceType, int fd, ma_device_info* pInfoOut) { audio_device_t fdDevice; #if !defined(MA_AUDIO4_USE_NEW_API) @@ -21795,9 +22518,9 @@ ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_de ma_format format; #endif - ma_assert(pContext != NULL); - ma_assert(fd >= 0); - ma_assert(pInfoOut != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(fd >= 0); + MA_ASSERT(pInfoOut != NULL); (void)pContext; (void)deviceType; @@ -21815,7 +22538,7 @@ ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_de audio_encoding_t encoding; ma_format format; - ma_zero_object(&encoding); + MA_ZERO_OBJECT(&encoding); encoding.index = counter; if (ioctl(fd, AUDIO_GETENC, &encoding) < 0) { break; @@ -21870,14 +22593,14 @@ ma_result ma_context_get_device_info_from_fd__audio4(ma_context* pContext, ma_de return MA_SUCCESS; } -ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { const int maxDevices = 64; char devpath[256]; int iDevice; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); /* Every device will be named "/dev/audioN", with a "/dev/audioctlN" equivalent. We use the "/dev/audioctlN" @@ -21903,7 +22626,7 @@ ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_dev if (fd >= 0) { /* Supports playback. */ ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_playback, fd, &deviceInfo) == MA_SUCCESS) { isTerminating = !callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); @@ -21919,7 +22642,7 @@ ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_dev if (fd >= 0) { /* Supports capture. */ ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); ma_construct_device_id__audio4(deviceInfo.id.audio4, sizeof(deviceInfo.id.audio4), "/dev/audio", iDevice); if (ma_context_get_device_info_from_fd__audio4(pContext, ma_device_type_capture, fd, &deviceInfo) == MA_SUCCESS) { isTerminating = !callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); @@ -21937,14 +22660,14 @@ ma_result ma_context_enumerate_devices__audio4(ma_context* pContext, ma_enum_dev return MA_SUCCESS; } -ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { int fd = -1; int deviceIndex = -1; char ctlid[256]; ma_result result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)shareMode; /* @@ -21981,9 +22704,9 @@ ma_result ma_context_get_device_info__audio4(ma_context* pContext, ma_device_typ return result; } -void ma_device_uninit__audio4(ma_device* pDevice) +static void ma_device_uninit__audio4(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { close(pDevice->audio4.fdCapture); @@ -21994,7 +22717,7 @@ void ma_device_uninit__audio4(ma_device* pDevice) } } -ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +static ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) { const char* pDefaultDeviceNames[] = { "/dev/audio", @@ -22013,10 +22736,10 @@ ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config ma_uint32 internalBufferSizeInFrames; ma_uint32 internalPeriods; - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(deviceType != ma_device_type_duplex); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); (void)pContext; @@ -22197,11 +22920,11 @@ ma_result ma_device_init_fd__audio4(ma_context* pContext, const ma_device_config return MA_SUCCESS; } -ma_result ma_device_init__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__audio4(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); - ma_zero_object(&pDevice->audio4); + MA_ZERO_OBJECT(&pDevice->audio4); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -22246,9 +22969,9 @@ ma_result ma_device_init__audio4(ma_context* pContext, const ma_device_config* p } #if 0 -ma_result ma_device_start__audio4(ma_device* pDevice) +static ma_result ma_device_start__audio4(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { if (pDevice->audio4.fdCapture == -1) { @@ -22266,7 +22989,7 @@ ma_result ma_device_start__audio4(ma_device* pDevice) } #endif -ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd) +static ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd) { if (fd == -1) { return MA_INVALID_ARGS; @@ -22285,9 +23008,9 @@ ma_result ma_device_stop_fd__audio4(ma_device* pDevice, int fd) return MA_SUCCESS; } -ma_result ma_device_stop__audio4(ma_device* pDevice) +static ma_result ma_device_stop__audio4(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ma_result result = ma_device_stop_fd__audio4(pDevice, pDevice->audio4.fdCapture); @@ -22306,7 +23029,7 @@ ma_result ma_device_stop__audio4(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { int result; @@ -22326,7 +23049,7 @@ ma_result ma_device_write__audio4(ma_device* pDevice, const void* pPCMFrames, ma return MA_SUCCESS; } -ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) { int result; @@ -22346,7 +23069,7 @@ ma_result ma_device_read__audio4(ma_device* pDevice, void* pPCMFrames, ma_uint32 return MA_SUCCESS; } -ma_result ma_device_main_loop__audio4(ma_device* pDevice) +static ma_result ma_device_main_loop__audio4(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; @@ -22359,77 +23082,88 @@ ma_result ma_device_main_loop__audio4(ma_device* pDevice) case ma_device_type_duplex: { /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__audio4(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__audio4(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__audio4(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__audio4(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__audio4()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } break; @@ -22500,18 +23234,18 @@ ma_result ma_device_main_loop__audio4(ma_device* pDevice) return result; } -ma_result ma_context_uninit__audio4(ma_context* pContext) +static ma_result ma_context_uninit__audio4(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_audio4); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_audio4); (void)pContext; return MA_SUCCESS; } -ma_result ma_context_init__audio4(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__audio4(const ma_context_config* pConfig, ma_context* pContext) { - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)pConfig; @@ -22545,7 +23279,7 @@ OSS Backend #define SNDCTL_DSP_HALT SNDCTL_DSP_RESET #endif -int ma_open_temp_device__oss() +static int ma_open_temp_device__oss() { /* The OSS sample code uses "/dev/mixer" as the device for getting system properties so I'm going to do the same. */ int fd = open("/dev/mixer", O_RDONLY, 0); @@ -22556,13 +23290,13 @@ int ma_open_temp_device__oss() return -1; } -ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd) +static ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, int* pfd) { const char* deviceName; int flags; - ma_assert(pContext != NULL); - ma_assert(pfd != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pfd != NULL); (void)pContext; *pfd = -1; @@ -22590,24 +23324,24 @@ ma_result ma_context_open_device__oss(ma_context* pContext, ma_device_type devic return MA_SUCCESS; } -ma_bool32 ma_context_is_device_id_equal__oss(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__oss(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return ma_strcmp(pID0->oss, pID1->oss) == 0; } -ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { int fd; oss_sysinfo si; int result; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); fd = ma_open_temp_device__oss(); if (fd == -1) { @@ -22626,7 +23360,7 @@ ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_device ma_device_info deviceInfo; ma_bool32 isTerminating = MA_FALSE; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); /* ID */ ma_strncpy_s(deviceInfo.id.oss, sizeof(deviceInfo.id.oss), ai.devnode, (size_t)-1); @@ -22665,14 +23399,14 @@ ma_result ma_context_enumerate_devices__oss(ma_context* pContext, ma_enum_device return MA_SUCCESS; } -ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_bool32 foundDevice; int fdTemp; oss_sysinfo si; int result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)shareMode; /* Handle the default device a little differently. */ @@ -22766,9 +23500,9 @@ ma_result ma_context_get_device_info__oss(ma_context* pContext, ma_device_type d return MA_SUCCESS; } -void ma_device_uninit__oss(ma_device* pDevice) +static void ma_device_uninit__oss(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { close(pDevice->oss.fdCapture); @@ -22779,7 +23513,7 @@ void ma_device_uninit__oss(ma_device* pDevice) } } -int ma_format_to_oss(ma_format format) +static int ma_format_to_oss(ma_format format) { int ossFormat = AFMT_U8; switch (format) { @@ -22794,7 +23528,7 @@ int ma_format_to_oss(ma_format format) return ossFormat; } -ma_format ma_format_from_oss(int ossFormat) +static ma_format ma_format_from_oss(int ossFormat) { if (ossFormat == AFMT_U8) { return ma_format_u8; @@ -22817,7 +23551,7 @@ ma_format ma_format_from_oss(int ossFormat) return ma_format_unknown; } -ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +static ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) { ma_result result; int ossResult; @@ -22829,10 +23563,10 @@ ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* p int ossSampleRate; int ossFragment; - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(deviceType != ma_device_type_duplex); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); (void)pContext; @@ -22951,13 +23685,13 @@ ma_result ma_device_init_fd__oss(ma_context* pContext, const ma_device_config* p return MA_SUCCESS; } -ma_result ma_device_init__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__oss(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(pDevice != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDevice != NULL); - ma_zero_object(&pDevice->oss); + MA_ZERO_OBJECT(&pDevice->oss); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -22980,9 +23714,9 @@ ma_result ma_device_init__oss(ma_context* pContext, const ma_device_config* pCon return MA_SUCCESS; } -ma_result ma_device_stop__oss(ma_device* pDevice) +static ma_result ma_device_stop__oss(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); /* We want to use SNDCTL_DSP_HALT. From the documentation: @@ -23014,7 +23748,7 @@ ma_result ma_device_stop__oss(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) +static ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesWritten) { int resultOSS; @@ -23034,7 +23768,7 @@ ma_result ma_device_write__oss(ma_device* pDevice, const void* pPCMFrames, ma_ui return MA_SUCCESS; } -ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) +static ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 frameCount, ma_uint32* pFramesRead) { int resultOSS; @@ -23054,7 +23788,7 @@ ma_result ma_device_read__oss(ma_device* pDevice, void* pPCMFrames, ma_uint32 fr return MA_SUCCESS; } -ma_result ma_device_main_loop__oss(ma_device* pDevice) +static ma_result ma_device_main_loop__oss(ma_device* pDevice) { ma_result result = MA_SUCCESS; ma_bool32 exitLoop = MA_FALSE; @@ -23067,84 +23801,95 @@ ma_result ma_device_main_loop__oss(ma_device* pDevice) case ma_device_type_duplex: { /* The process is: device_read -> convert -> callback -> convert -> device_write */ - ma_uint8 capturedDeviceData[8192]; - ma_uint8 playbackDeviceData[8192]; - ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - - ma_uint32 totalFramesProcessed = 0; - ma_uint32 periodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); + ma_uint32 totalCapturedDeviceFramesProcessed = 0; + ma_uint32 capturedDevicePeriodSizeInFrames = ma_min(pDevice->capture.internalBufferSizeInFrames/pDevice->capture.internalPeriods, pDevice->playback.internalBufferSizeInFrames/pDevice->playback.internalPeriods); - while (totalFramesProcessed < periodSizeInFrames) { - ma_uint32 framesRemaining = periodSizeInFrames - totalFramesProcessed; - ma_uint32 framesProcessed; - ma_uint32 framesToProcess = framesRemaining; - if (framesToProcess > capturedDeviceDataCapInFrames) { - framesToProcess = capturedDeviceDataCapInFrames; + while (totalCapturedDeviceFramesProcessed < capturedDevicePeriodSizeInFrames) { + ma_uint8 capturedDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackDeviceData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedDeviceDataCapInFrames = sizeof(capturedDeviceData) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + ma_uint32 playbackDeviceDataCapInFrames = sizeof(playbackDeviceData) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + ma_uint32 capturedDeviceFramesRemaining; + ma_uint32 capturedDeviceFramesProcessed; + ma_uint32 capturedDeviceFramesToProcess; + ma_uint32 capturedDeviceFramesToTryProcessing = capturedDevicePeriodSizeInFrames - totalCapturedDeviceFramesProcessed; + if (capturedDeviceFramesToTryProcessing > capturedDeviceDataCapInFrames) { + capturedDeviceFramesToTryProcessing = capturedDeviceDataCapInFrames; } - result = ma_device_read__oss(pDevice, capturedDeviceData, framesToProcess, &framesProcessed); + result = ma_device_read__oss(pDevice, capturedDeviceData, capturedDeviceFramesToTryProcessing, &capturedDeviceFramesToProcess); if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - pDevice->capture._dspFrameCount = framesToProcess; - pDevice->capture._dspFrames = capturedDeviceData; + capturedDeviceFramesRemaining = capturedDeviceFramesToProcess; + capturedDeviceFramesProcessed = 0; for (;;) { - ma_uint8 capturedData[8192]; - ma_uint8 playbackData[8192]; - ma_uint32 capturedDataCapInFrames = sizeof(capturedData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); - ma_uint32 playbackDataCapInFrames = sizeof(playbackData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); - - ma_uint32 capturedFramesToTryProcessing = ma_min(capturedDataCapInFrames, playbackDataCapInFrames); - ma_uint32 capturedFramesToProcess = (ma_uint32)ma_pcm_converter_read(&pDevice->capture.converter, capturedData, capturedFramesToTryProcessing); - if (capturedFramesToProcess == 0) { - break; /* Don't fire the data callback with zero frames. */ + ma_uint8 capturedClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint8 playbackClientData[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + ma_uint32 capturedClientDataCapInFrames = sizeof(capturedClientData) / ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels); + ma_uint32 playbackClientDataCapInFrames = sizeof(playbackClientData) / ma_get_bytes_per_frame(pDevice->playback.format, pDevice->playback.channels); + ma_uint64 capturedClientFramesToProcessThisIteration = ma_min(capturedClientDataCapInFrames, playbackClientDataCapInFrames); + ma_uint64 capturedDeviceFramesToProcessThisIteration = capturedDeviceFramesRemaining; + ma_uint8* pRunningCapturedDeviceFrames = ma_offset_ptr(capturedDeviceData, capturedDeviceFramesProcessed * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels)); + + /* Convert capture data from device format to client format. */ + result = ma_data_converter_process_pcm_frames(&pDevice->capture.converter, pRunningCapturedDeviceFrames, &capturedDeviceFramesToProcessThisIteration, capturedClientData, &capturedClientFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + break; + } + + /* + If we weren't able to generate any output frames it must mean we've exhaused all of our input. The only time this would not be the case is if capturedClientData was too small + which should never be the case when it's of the size MA_DATA_CONVERTER_STACK_BUFFER_SIZE. + */ + if (capturedClientFramesToProcessThisIteration == 0) { + break; } - ma_device__on_data(pDevice, playbackData, capturedData, capturedFramesToProcess); + ma_device__on_data(pDevice, playbackClientData, capturedClientData, (ma_uint32)capturedClientFramesToProcessThisIteration); /* Safe cast .*/ - /* At this point the playbackData buffer should be holding data that needs to be written to the device. */ - pDevice->playback._dspFrameCount = capturedFramesToProcess; - pDevice->playback._dspFrames = playbackData; + capturedDeviceFramesProcessed += (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + capturedDeviceFramesRemaining -= (ma_uint32)capturedDeviceFramesToProcessThisIteration; /* Safe cast. */ + + /* At this point the playbackClientData buffer should be holding data that needs to be written to the device. */ for (;;) { - ma_uint32 playbackDeviceFramesCount = (ma_uint32)ma_pcm_converter_read(&pDevice->playback.converter, playbackDeviceData, playbackDeviceDataCapInFrames); - if (playbackDeviceFramesCount == 0) { + ma_uint64 convertedClientFrameCount = capturedClientFramesToProcessThisIteration; + ma_uint64 convertedDeviceFrameCount = playbackDeviceDataCapInFrames; + result = ma_data_converter_process_pcm_frames(&pDevice->playback.converter, playbackClientData, &convertedClientFrameCount, playbackDeviceData, &convertedDeviceFrameCount); + if (result != MA_SUCCESS) { break; } - result = ma_device_write__oss(pDevice, playbackDeviceData, playbackDeviceFramesCount, NULL); + result = ma_device_write__oss(pDevice, playbackDeviceData, (ma_uint32)convertedDeviceFrameCount, NULL); /* Safe cast. */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } - if (playbackDeviceFramesCount < playbackDeviceDataCapInFrames) { + capturedClientFramesToProcessThisIteration -= (ma_uint32)convertedClientFrameCount; /* Safe cast. */ + if (capturedClientFramesToProcessThisIteration == 0) { break; } } - if (capturedFramesToProcess < capturedFramesToTryProcessing) { - break; - } - - /* In case an error happened from ma_device_write2__alsa()... */ + /* In case an error happened from ma_device_write__oss()... */ if (result != MA_SUCCESS) { exitLoop = MA_TRUE; break; } } - totalFramesProcessed += framesProcessed; + totalCapturedDeviceFramesProcessed += capturedDeviceFramesProcessed; } } break; case ma_device_type_capture: { /* We read in chunks of the period size, but use a stack allocated buffer for the intermediary. */ - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); ma_uint32 periodSizeInFrames = pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods; ma_uint32 framesReadThisPeriod = 0; @@ -23171,7 +23916,7 @@ ma_result ma_device_main_loop__oss(ma_device* pDevice) case ma_device_type_playback: { /* We write in chunks of the period size, but use a stack allocated buffer for the intermediary. */ - ma_uint8 intermediaryBuffer[8192]; + ma_uint8 intermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; ma_uint32 intermediaryBufferSizeInFrames = sizeof(intermediaryBuffer) / ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); ma_uint32 periodSizeInFrames = pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods; ma_uint32 framesWrittenThisPeriod = 0; @@ -23208,22 +23953,22 @@ ma_result ma_device_main_loop__oss(ma_device* pDevice) return result; } -ma_result ma_context_uninit__oss(ma_context* pContext) +static ma_result ma_context_uninit__oss(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_oss); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_oss); (void)pContext; return MA_SUCCESS; } -ma_result ma_context_init__oss(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__oss(const ma_context_config* pConfig, ma_context* pContext) { int fd; int ossVersion; int result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); (void)pConfig; @@ -23322,7 +24067,8 @@ typedef int32_t ma_aaudio_data_callback_result_t; typedef struct ma_AAudioStreamBuilder_t* ma_AAudioStreamBuilder; typedef struct ma_AAudioStream_t* ma_AAudioStream; -typedef ma_aaudio_data_callback_result_t (*ma_AAudioStream_dataCallback)(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames); +typedef ma_aaudio_data_callback_result_t (* ma_AAudioStream_dataCallback) (ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t numFrames); +typedef void (* ma_AAudioStream_errorCallback)(ma_AAudioStream *pStream, void *pUserData, ma_aaudio_result_t error); typedef ma_aaudio_result_t (* MA_PFN_AAudio_createStreamBuilder) (ma_AAudioStreamBuilder** ppBuilder); typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_delete) (ma_AAudioStreamBuilder* pBuilder); @@ -23335,6 +24081,7 @@ typedef void (* MA_PFN_AAudioStreamBuilder_setSampleRate) typedef void (* MA_PFN_AAudioStreamBuilder_setBufferCapacityInFrames)(ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); typedef void (* MA_PFN_AAudioStreamBuilder_setFramesPerDataCallback) (ma_AAudioStreamBuilder* pBuilder, int32_t numFrames); typedef void (* MA_PFN_AAudioStreamBuilder_setDataCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_dataCallback callback, void* pUserData); +typedef void (* MA_PFN_AAudioStreamBuilder_setErrorCallback) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream_errorCallback callback, void* pUserData); typedef void (* MA_PFN_AAudioStreamBuilder_setPerformanceMode) (ma_AAudioStreamBuilder* pBuilder, ma_aaudio_performance_mode_t mode); typedef ma_aaudio_result_t (* MA_PFN_AAudioStreamBuilder_openStream) (ma_AAudioStreamBuilder* pBuilder, ma_AAudioStream** ppStream); typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_close) (ma_AAudioStream* pStream); @@ -23349,7 +24096,7 @@ typedef int32_t (* MA_PFN_AAudioStream_getFramesPerBurst) typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStart) (ma_AAudioStream* pStream); typedef ma_aaudio_result_t (* MA_PFN_AAudioStream_requestStop) (ma_AAudioStream* pStream); -ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA) +static ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA) { switch (resultAA) { @@ -23360,10 +24107,32 @@ ma_result ma_result_from_aaudio(ma_aaudio_result_t resultAA) return MA_ERROR; } -ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +static void ma_stream_error_callback__aaudio(ma_AAudioStream* pStream, void* pUserData, ma_aaudio_result_t error) +{ + ma_device* pDevice = (ma_device*)pUserData; + MA_ASSERT(pDevice != NULL); + + (void)error; + +#if defined(MA_DEBUG_OUTPUT) + printf("[AAudio] ERROR CALLBACK: error=%d, AAudioStream_getState()=%d\n", error, ((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream)); +#endif + + /* + From the documentation for AAudio, when a device is disconnected all we can do is stop it. However, we cannot stop it from the callback - we need + to do it from another thread. Therefore we are going to use an event thread for the AAudio backend to do this cleanly and safely. + */ + if (((MA_PFN_AAudioStream_getState)pDevice->pContext->aaudio.AAudioStream_getState)(pStream) == MA_AAUDIO_STREAM_STATE_DISCONNECTED) { +#if defined(MA_DEBUG_OUTPUT) + printf("[AAudio] Device Disconnected.\n"); +#endif + } +} + +static ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) { ma_device* pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_duplex) { ma_device__handle_duplex_callback_capture(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB); @@ -23375,10 +24144,10 @@ ma_aaudio_data_callback_result_t ma_stream_data_callback_capture__aaudio(ma_AAud return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; } -ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) +static ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAudioStream* pStream, void* pUserData, void* pAudioData, int32_t frameCount) { ma_device* pDevice = (ma_device*)pUserData; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_duplex) { ma_device__handle_duplex_callback_playback(pDevice, frameCount, pAudioData, &pDevice->aaudio.duplexRB); @@ -23390,12 +24159,12 @@ ma_aaudio_data_callback_result_t ma_stream_data_callback_playback__aaudio(ma_AAu return MA_AAUDIO_CALLBACK_RESULT_CONTINUE; } -ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, const ma_device_config* pConfig, const ma_device* pDevice, ma_AAudioStream** ppStream) +static ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, const ma_device_config* pConfig, const ma_device* pDevice, ma_AAudioStream** ppStream) { ma_AAudioStreamBuilder* pBuilder; ma_aaudio_result_t resultAA; - ma_assert(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */ + MA_ASSERT(deviceType != ma_device_type_duplex); /* This function should not be called for a full-duplex device type. */ *ppStream = NULL; @@ -23453,6 +24222,8 @@ ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType ((MA_PFN_AAudioStreamBuilder_setPerformanceMode)pContext->aaudio.AAudioStreamBuilder_setPerformanceMode)(pBuilder, (pConfig->performanceProfile == ma_performance_profile_low_latency) ? MA_AAUDIO_PERFORMANCE_MODE_LOW_LATENCY : MA_AAUDIO_PERFORMANCE_MODE_NONE); } + ((MA_PFN_AAudioStreamBuilder_setErrorCallback)pContext->aaudio.AAudioStreamBuilder_setErrorCallback)(pBuilder, ma_stream_error_callback__aaudio, (void*)pDevice); + resultAA = ((MA_PFN_AAudioStreamBuilder_openStream)pContext->aaudio.AAudioStreamBuilder_openStream)(pBuilder, ppStream); if (resultAA != MA_AAUDIO_OK) { *ppStream = NULL; @@ -23464,12 +24235,12 @@ ma_result ma_open_stream__aaudio(ma_context* pContext, ma_device_type deviceType return MA_SUCCESS; } -ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream) +static ma_result ma_close_stream__aaudio(ma_context* pContext, ma_AAudioStream* pStream) { return ma_result_from_aaudio(((MA_PFN_AAudioStream_close)pContext->aaudio.AAudioStream_close)(pStream)); } -ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType) +static ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type deviceType) { /* The only way to know this is to try creating a stream. */ ma_AAudioStream* pStream; @@ -23482,7 +24253,7 @@ ma_bool32 ma_has_default_device__aaudio(ma_context* pContext, ma_device_type dev return MA_TRUE; } -ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState) +static ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_AAudioStream* pStream, ma_aaudio_stream_state_t oldState, ma_aaudio_stream_state_t newState) { ma_aaudio_stream_state_t actualNewState; ma_aaudio_result_t resultAA = ((MA_PFN_AAudioStream_waitForStateChange)pContext->aaudio.AAudioStream_waitForStateChange)(pStream, oldState, &actualNewState, 5000000000); /* 5 second timeout. */ @@ -23498,29 +24269,29 @@ ma_result ma_wait_for_simple_state_transition__aaudio(ma_context* pContext, ma_A } -ma_bool32 ma_context_is_device_id_equal__aaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +static ma_bool32 ma_context_is_device_id_equal__aaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); (void)pContext; return pID0->aaudio == pID1->aaudio; } -ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { ma_bool32 cbResult = MA_TRUE; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); /* Unfortunately AAudio does not have an enumeration API. Therefore I'm only going to report default devices, but only if it can instantiate a stream. */ /* Playback. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); @@ -23532,7 +24303,7 @@ ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_dev /* Capture. */ if (cbResult) { ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); + MA_ZERO_OBJECT(&deviceInfo); deviceInfo.id.aaudio = MA_AAUDIO_UNSPECIFIED; ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); @@ -23544,12 +24315,12 @@ ma_result ma_context_enumerate_devices__aaudio(ma_context* pContext, ma_enum_dev return MA_SUCCESS; } -ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) { ma_AAudioStream* pStream; ma_result result; - ma_assert(pContext != NULL); + MA_ASSERT(pContext != NULL); /* No exclusive mode with AAudio. */ if (shareMode == ma_share_mode_exclusive) { @@ -23595,9 +24366,9 @@ ma_result ma_context_get_device_info__aaudio(ma_context* pContext, ma_device_typ } -void ma_device_uninit__aaudio(ma_device* pDevice) +static void ma_device_uninit__aaudio(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); @@ -23614,11 +24385,11 @@ void ma_device_uninit__aaudio(ma_device* pDevice) } } -ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pConfig->deviceType == ma_device_type_loopback) { return MA_DEVICE_TYPE_NOT_SUPPORTED; @@ -23677,7 +24448,7 @@ ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* p if (pConfig->deviceType == ma_device_type_duplex) { ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames); - ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->aaudio.duplexRB); + ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->aaudio.duplexRB); if (result != MA_SUCCESS) { if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ma_close_stream__aaudio(pDevice->pContext, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); @@ -23694,7 +24465,7 @@ ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* p void* pMarginData; ma_pcm_rb_acquire_write(&pDevice->aaudio.duplexRB, &marginSizeInFrames, &pMarginData); { - ma_zero_memory(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); } ma_pcm_rb_commit_write(&pDevice->aaudio.duplexRB, marginSizeInFrames, pMarginData); } @@ -23703,12 +24474,12 @@ ma_result ma_device_init__aaudio(ma_context* pContext, const ma_device_config* p return MA_SUCCESS; } -ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +static ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) { ma_aaudio_result_t resultAA; ma_aaudio_stream_state_t currentState; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); resultAA = ((MA_PFN_AAudioStream_requestStart)pDevice->pContext->aaudio.AAudioStream_requestStart)(pStream); if (resultAA != MA_AAUDIO_OK) { @@ -23735,12 +24506,12 @@ ma_result ma_device_start_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pS return MA_SUCCESS; } -ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) +static ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pStream) { ma_aaudio_result_t resultAA; ma_aaudio_stream_state_t currentState; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); resultAA = ((MA_PFN_AAudioStream_requestStop)pDevice->pContext->aaudio.AAudioStream_requestStop)(pStream); if (resultAA != MA_AAUDIO_OK) { @@ -23765,9 +24536,9 @@ ma_result ma_device_stop_stream__aaudio(ma_device* pDevice, ma_AAudioStream* pSt return MA_SUCCESS; } -ma_result ma_device_start__aaudio(ma_device* pDevice) +static ma_result ma_device_start__aaudio(ma_device* pDevice) { - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ma_result result = ma_device_start_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); @@ -23789,11 +24560,11 @@ ma_result ma_device_start__aaudio(ma_device* pDevice) return MA_SUCCESS; } -ma_result ma_device_stop__aaudio(ma_device* pDevice) +static ma_result ma_device_stop__aaudio(ma_device* pDevice) { ma_stop_proc onStop; - ma_assert(pDevice != NULL); + MA_ASSERT(pDevice != NULL); if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { ma_result result = ma_device_stop_stream__aaudio(pDevice, (ma_AAudioStream*)pDevice->aaudio.pStreamCapture); @@ -23818,10 +24589,10 @@ ma_result ma_device_stop__aaudio(ma_device* pDevice) } -ma_result ma_context_uninit__aaudio(ma_context* pContext) +static ma_result ma_context_uninit__aaudio(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_aaudio); + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_aaudio); ma_dlclose(pContext, pContext->aaudio.hAAudio); pContext->aaudio.hAAudio = NULL; @@ -23829,7 +24600,7 @@ ma_result ma_context_uninit__aaudio(ma_context* pContext) return MA_SUCCESS; } -ma_result ma_context_init__aaudio(const ma_context_config* pConfig, ma_context* pContext) +static ma_result ma_context_init__aaudio(const ma_context_config* pConfig, ma_context* pContext) { const char* libNames[] = { "libaaudio.so" @@ -23858,6 +24629,7 @@ ma_result ma_context_init__aaudio(const ma_context_config* pConfig, ma_context* pContext->aaudio.AAudioStreamBuilder_setBufferCapacityInFrames = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setBufferCapacityInFrames"); pContext->aaudio.AAudioStreamBuilder_setFramesPerDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setFramesPerDataCallback"); pContext->aaudio.AAudioStreamBuilder_setDataCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setDataCallback"); + pContext->aaudio.AAudioStreamBuilder_setErrorCallback = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setErrorCallback"); pContext->aaudio.AAudioStreamBuilder_setPerformanceMode = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_setPerformanceMode"); pContext->aaudio.AAudioStreamBuilder_openStream = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStreamBuilder_openStream"); pContext->aaudio.AAudioStream_close = (ma_proc)ma_dlsym(pContext, pContext->aaudio.hAAudio, "AAudioStream_close"); @@ -23917,7 +24689,7 @@ ma_uint32 g_maOpenSLInitCounter = 0; #endif /* Converts an individual OpenSL-style channel identifier (SL_SPEAKER_FRONT_LEFT, etc.) to miniaudio. */ -ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id) +static ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id) { switch (id) { @@ -23944,7 +24716,7 @@ ma_uint8 ma_channel_id_to_ma__opensl(SLuint32 id) } /* Converts an individual miniaudio channel identifier (MA_CHANNEL_FRONT_LEFT, etc.) to OpenSL-style. */ -SLuint32 ma_channel_id_to_opensl(ma_uint8 id) +static SLuint32 ma_channel_id_to_opensl(ma_uint8 id) { switch (id) { @@ -23972,7 +24744,7 @@ SLuint32 ma_channel_id_to_opensl(ma_uint8 id) } /* Converts a channel mapping to an OpenSL-style channel mask. */ -SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels) +static SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel channelMap[MA_MAX_CHANNELS], ma_uint32 channels) { SLuint32 channelMask = 0; ma_uint32 iChannel; @@ -23984,7 +24756,7 @@ SLuint32 ma_channel_map_to_channel_mask__opensl(const ma_channel channelMap[MA_M } /* Converts an OpenSL-style channel mask to a miniaudio channel map. */ -void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +static void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { if (channels == 1 && channelMask == 0) { channelMap[0] = MA_CHANNEL_MONO; @@ -24010,7 +24782,7 @@ void ma_channel_mask_to_channel_map__opensl(SLuint32 channelMask, ma_uint32 chan } } -SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec) +static SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec) { if (samplesPerSec <= SL_SAMPLINGRATE_8) { return SL_SAMPLINGRATE_8; @@ -24051,1817 +24823,2418 @@ SLuint32 ma_round_to_standard_sample_rate__opensl(SLuint32 samplesPerSec) if (samplesPerSec <= SL_SAMPLINGRATE_96) { return SL_SAMPLINGRATE_96; } - if (samplesPerSec <= SL_SAMPLINGRATE_192) { - return SL_SAMPLINGRATE_192; + if (samplesPerSec <= SL_SAMPLINGRATE_192) { + return SL_SAMPLINGRATE_192; + } +#endif + + return SL_SAMPLINGRATE_16; +} + + +static ma_bool32 ma_context_is_device_id_equal__opensl(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; + + return pID0->opensl == pID1->opensl; +} + +static ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_bool32 cbResult; + + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + ma_bool32 isTerminated = MA_FALSE; + + SLuint32 pDeviceIDs[128]; + SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]); + + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + /* Playback */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return MA_NO_DEVICE; + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + /* Capture */ + if (!isTerminated) { + resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs); + if (resultSL != SL_RESULT_SUCCESS) { + return MA_NO_DEVICE; + } + + for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + deviceInfo.id.opensl = pDeviceIDs[iDevice]; + + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); + if (resultSL == SL_RESULT_SUCCESS) { + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1); + + ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MA_FALSE) { + isTerminated = MA_TRUE; + break; + } + } + } + } + + return MA_SUCCESS; +#else + goto return_default_device; +#endif + +return_default_device:; + cbResult = MA_TRUE; + + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + } + + /* Capture. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } + + return MA_SUCCESS; +} + +static ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; + } + + /* No exclusive mode with OpenSL|ES. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } + + /* + TODO: Test Me. + + This is currently untested, so for now we are just returning default devices. + */ +#if 0 && !defined(MA_ANDROID) + SLAudioIODeviceCapabilitiesItf deviceCaps; + SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + if (resultSL != SL_RESULT_SUCCESS) { + /* The interface may not be supported so just report a default device. */ + goto return_default_device; + } + + if (deviceType == ma_device_type_playback) { + SLAudioOutputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return MA_NO_DEVICE; + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1); + } else { + SLAudioInputDescriptor desc; + resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc); + if (resultSL != SL_RESULT_SUCCESS) { + return MA_NO_DEVICE; + } + + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1); + } + + goto return_detailed_info; +#else + goto return_default_device; +#endif + +return_default_device: + if (pDeviceID != NULL) { + if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) || + (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) { + return MA_NO_DEVICE; /* Don't know the device. */ + } + } + + /* Name / Description */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); } + + goto return_detailed_info; + + +return_detailed_info: + + /* + For now we're just outputting a set of values that are supported by the API but not necessarily supported + by the device natively. Later on we should work on this so that it more closely reflects the device's + actual native format. + */ + pDeviceInfo->minChannels = 1; + pDeviceInfo->maxChannels = 2; + pDeviceInfo->minSampleRate = 8000; + pDeviceInfo->maxSampleRate = 48000; + pDeviceInfo->formatCount = 2; + pDeviceInfo->formats[0] = ma_format_u8; + pDeviceInfo->formats[1] = ma_format_s16; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + pDeviceInfo->formats[pDeviceInfo->formatCount] = ma_format_f32; + pDeviceInfo->formatCount += 1; #endif - return SL_SAMPLINGRATE_16; + return MA_SUCCESS; } -ma_bool32 ma_context_is_device_id_equal__opensl(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +#ifdef MA_ANDROID +/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/ +static void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) { - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); - (void)pContext; + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; - return pID0->opensl == pID1->opensl; + MA_ASSERT(pDevice != NULL); + + (void)pBufferQueue; + + /* + For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like + OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this, + but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :( + */ + + /* Don't do anything if the device is not started. */ + if (pDevice->state != MA_STATE_STARTED) { + return; + } + + periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes); + + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods), pBuffer, &pDevice->opensl.duplexRB); + } else { + ma_device__send_frames_to_client(pDevice, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods), pBuffer); + } + + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + return; + } + + pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods; } -ma_result ma_context_enumerate_devices__opensl(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) { - ma_bool32 cbResult; + ma_device* pDevice = (ma_device*)pUserData; + size_t periodSizeInBytes; + ma_uint8* pBuffer; + SLresult resultSL; - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pDevice != NULL); - ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to enumerate devices. */ - if (g_maOpenSLInitCounter == 0) { - return MA_INVALID_OPERATION; + (void)pBufferQueue; + + /* Don't do anything if the device is not started. */ + if (pDevice->state != MA_STATE_STARTED) { + return; } - /* - TODO: Test Me. - - This is currently untested, so for now we are just returning default devices. - */ -#if 0 && !defined(MA_ANDROID) - ma_bool32 isTerminated = MA_FALSE; + periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes); - SLuint32 pDeviceIDs[128]; - SLint32 deviceCount = sizeof(pDeviceIDs) / sizeof(pDeviceIDs[0]); + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods), pBuffer, &pDevice->opensl.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods), pBuffer); + } - SLAudioIODeviceCapabilitiesItf deviceCaps; - SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes); if (resultSL != SL_RESULT_SUCCESS) { - /* The interface may not be supported so just report a default device. */ - goto return_default_device; + return; } - /* Playback */ - if (!isTerminated) { - resultSL = (*deviceCaps)->GetAvailableAudioOutputs(deviceCaps, &deviceCount, pDeviceIDs); - if (resultSL != SL_RESULT_SUCCESS) { - return MA_NO_DEVICE; - } + pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods; +} +#endif - for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { - ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); - deviceInfo.id.opensl = pDeviceIDs[iDevice]; +static void ma_device_uninit__opensl(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); - SLAudioOutputDescriptor desc; - resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); - if (resultSL == SL_RESULT_SUCCESS) { - ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.pDeviceName, (size_t)-1); + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */ + if (g_maOpenSLInitCounter == 0) { + return; + } - ma_bool32 cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); - if (cbResult == MA_FALSE) { - isTerminated = MA_TRUE; - break; - } - } + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioRecorderObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); } + + ma__free_from_callbacks(pDevice->opensl.pBufferCapture, &pDevice->pContext->allocationCallbacks); } - /* Capture */ - if (!isTerminated) { - resultSL = (*deviceCaps)->GetAvailableAudioInputs(deviceCaps, &deviceCount, pDeviceIDs); - if (resultSL != SL_RESULT_SUCCESS) { - return MA_NO_DEVICE; + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + if (pDevice->opensl.pAudioPlayerObj) { + MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); + } + if (pDevice->opensl.pOutputMixObj) { + MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); } - for (SLint32 iDevice = 0; iDevice < deviceCount; ++iDevice) { - ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); - deviceInfo.id.opensl = pDeviceIDs[iDevice]; - - SLAudioInputDescriptor desc; - resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, deviceInfo.id.opensl, &desc); - if (resultSL == SL_RESULT_SUCCESS) { - ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), (const char*)desc.deviceName, (size_t)-1); + ma__free_from_callbacks(pDevice->opensl.pBufferPlayback, &pDevice->pContext->allocationCallbacks); + } - ma_bool32 cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); - if (cbResult == MA_FALSE) { - isTerminated = MA_TRUE; - break; - } - } - } + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->opensl.duplexRB); } +} - return MA_SUCCESS; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 +typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM; #else - goto return_default_device; +typedef SLDataFormat_PCM ma_SLDataFormat_PCM; #endif -return_default_device:; - cbResult = MA_TRUE; +static ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat) +{ +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (format == ma_format_f32) { + pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX; + pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT; + } else { + pDataFormat->formatType = SL_DATAFORMAT_PCM; + } +#else + pDataFormat->formatType = SL_DATAFORMAT_PCM; +#endif - /* Playback. */ - if (cbResult) { - ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); - ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); - cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + pDataFormat->numChannels = channels; + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */ + pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format)*8; + pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels); + pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN; + + /* + Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html + - Only mono and stereo is supported. + - Only u8 and s16 formats are supported. + - Maximum sample rate of 48000. + */ +#ifdef MA_ANDROID + if (pDataFormat->numChannels > 2) { + pDataFormat->numChannels = 2; + } +#if __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + /* It's floating point. */ + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + if (pDataFormat->bitsPerSample > 32) { + pDataFormat->bitsPerSample = 32; + } + } else { + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } + } +#else + if (pDataFormat->bitsPerSample > 16) { + pDataFormat->bitsPerSample = 16; + } +#endif + if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) { + ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48; } +#endif - /* Capture. */ - if (cbResult) { - ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); - ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); - cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */ + + return MA_SUCCESS; +} + +static ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap) +{ + ma_bool32 isFloatingPoint = MA_FALSE; +#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 + if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { + MA_ASSERT(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); + isFloatingPoint = MA_TRUE; + } +#endif + if (isFloatingPoint) { + if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_f32; + } + } else { + if (pDataFormat->bitsPerSample == 8) { + *pFormat = ma_format_u8; + } else if (pDataFormat->bitsPerSample == 16) { + *pFormat = ma_format_s16; + } else if (pDataFormat->bitsPerSample == 24) { + *pFormat = ma_format_s24; + } else if (pDataFormat->bitsPerSample == 32) { + *pFormat = ma_format_s32; + } } + *pChannels = pDataFormat->numChannels; + *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000; + ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, pDataFormat->numChannels, pChannelMap); + return MA_SUCCESS; } -ma_result ma_context_get_device_info__opensl(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_device_init__opensl(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { - ma_assert(pContext != NULL); +#ifdef MA_ANDROID + SLDataLocator_AndroidSimpleBufferQueue queue; + SLresult resultSL; + ma_uint32 bufferSizeInFrames; + size_t bufferSizeInBytes; + const SLInterfaceID itfIDs1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE}; + const SLboolean itfIDsRequired1[] = {SL_BOOLEAN_TRUE}; +#endif + + (void)pContext; - ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to get device info. */ + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */ if (g_maOpenSLInitCounter == 0) { return MA_INVALID_OPERATION; } - /* No exclusive mode with OpenSL|ES. */ - if (shareMode == ma_share_mode_exclusive) { - return MA_SHARE_MODE_NOT_SUPPORTED; + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; } /* - TODO: Test Me. - - This is currently untested, so for now we are just returning default devices. + For now, only supporting Android implementations of OpenSL|ES since that's the only one I've + been able to test with and I currently depend on Android-specific extensions (simple buffer + queues). */ -#if 0 && !defined(MA_ANDROID) - SLAudioIODeviceCapabilitiesItf deviceCaps; - SLresult resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_AUDIOIODEVICECAPABILITIES, &deviceCaps); - if (resultSL != SL_RESULT_SUCCESS) { - /* The interface may not be supported so just report a default device. */ - goto return_default_device; +#ifdef MA_ANDROID + /* No exclusive mode with OpenSL|ES. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; } - if (deviceType == ma_device_type_playback) { - SLAudioOutputDescriptor desc; - resultSL = (*deviceCaps)->QueryAudioOutputCapabilities(deviceCaps, pDeviceID->opensl, &desc); - if (resultSL != SL_RESULT_SUCCESS) { - return MA_NO_DEVICE; + /* Now we can start initializing the device properly. */ + MA_ASSERT(pDevice != NULL); + MA_ZERO_OBJECT(&pDevice->opensl); + + queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; + queue.numBuffers = pConfig->periods; + + + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataLocator_IODevice locatorDevice; + SLDataSource source; + SLDataSink sink; + + ma_SLDataFormat_PCM_init__opensl(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &pcm); + + locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE; + locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT; + locatorDevice.deviceID = (pConfig->capture.pDeviceID == NULL) ? SL_DEFAULTDEVICEID_AUDIOINPUT : pConfig->capture.pDeviceID->opensl; + locatorDevice.device = NULL; + + source.pLocator = &locatorDevice; + source.pFormat = NULL; + + sink.pLocator = &queue; + sink.pFormat = (SLDataFormat_PCM*)&pcm; + + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 1; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */ + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); } - ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.pDeviceName, (size_t)-1); - } else { - SLAudioInputDescriptor desc; - resultSL = (*deviceCaps)->QueryAudioInputCapabilities(deviceCaps, pDeviceID->opensl, &desc); if (resultSL != SL_RESULT_SUCCESS) { - return MA_NO_DEVICE; + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); } - ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), (const char*)desc.deviceName, (size_t)-1); - } + if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - goto return_detailed_info; -#else - goto return_default_device; -#endif + if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_RECORD, &pDevice->opensl.pAudioRecorder) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } -return_default_device: - if (pDeviceID != NULL) { - if ((deviceType == ma_device_type_playback && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOOUTPUT) || - (deviceType == ma_device_type_capture && pDeviceID->opensl != SL_DEFAULTDEVICEID_AUDIOINPUT)) { - return MA_NO_DEVICE; /* Don't know the device. */ + if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); } - } - /* Name / Description */ - if (deviceType == ma_device_type_playback) { - ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); - } else { - ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + if (MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->capture.internalFormat, &pDevice->capture.internalChannels, &pDevice->capture.internalSampleRate, pDevice->capture.internalChannelMap); + + /* Buffer. */ + bufferSizeInFrames = pConfig->bufferSizeInFrames; + if (bufferSizeInFrames == 0) { + bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pDevice->capture.internalSampleRate); + } + pDevice->capture.internalPeriods = pConfig->periods; + pDevice->capture.internalBufferSizeInFrames = (bufferSizeInFrames / pDevice->capture.internalPeriods) * pDevice->capture.internalPeriods; + pDevice->opensl.currentBufferIndexCapture = 0; + + bufferSizeInBytes = pDevice->capture.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + pDevice->opensl.pBufferCapture = (ma_uint8*)ma__calloc_from_callbacks(bufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->opensl.pBufferCapture == NULL) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY); + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes); } - goto return_detailed_info; + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + ma_SLDataFormat_PCM pcm; + SLDataSource source; + SLDataLocator_OutputMix outmixLocator; + SLDataSink sink; + ma_SLDataFormat_PCM_init__opensl(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &pcm); -return_detailed_info: + resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - /* - For now we're just outputting a set of values that are supported by the API but not necessarily supported - by the device natively. Later on we should work on this so that it more closely reflects the device's - actual native format. - */ - pDeviceInfo->minChannels = 1; - pDeviceInfo->maxChannels = 2; - pDeviceInfo->minSampleRate = 8000; - pDeviceInfo->maxSampleRate = 48000; - pDeviceInfo->formatCount = 2; - pDeviceInfo->formats[0] = ma_format_u8; - pDeviceInfo->formats[1] = ma_format_s16; -#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 - pDeviceInfo->formats[pDeviceInfo->formatCount] = ma_format_f32; - pDeviceInfo->formatCount += 1; -#endif + if (MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE)) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - return MA_SUCCESS; -} + if (MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + /* Set the output device. */ + if (pConfig->playback.pDeviceID != NULL) { + SLuint32 deviceID_OpenSL = pConfig->playback.pDeviceID->opensl; + MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL); + } + + source.pLocator = &queue; + source.pFormat = (SLDataFormat_PCM*)&pcm; -#ifdef MA_ANDROID -/*void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, SLuint32 eventFlags, const void* pBuffer, SLuint32 bufferSize, SLuint32 dataUsed, void* pContext)*/ -void ma_buffer_queue_callback_capture__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) -{ - ma_device* pDevice = (ma_device*)pUserData; - size_t periodSizeInBytes; - ma_uint8* pBuffer; - SLresult resultSL; + outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX; + outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj; - ma_assert(pDevice != NULL); + sink.pLocator = &outmixLocator; + sink.pFormat = NULL; - (void)pBufferQueue; + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) { + /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ + pcm.formatType = SL_DATAFORMAT_PCM; + pcm.numChannels = 2; + ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; + pcm.bitsPerSample = 16; + pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ + pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; + resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); + } - /* - For now, don't do anything unless the buffer was fully processed. From what I can tell, it looks like - OpenSL|ES 1.1 improves on buffer queues to the point that we could much more intelligently handle this, - but unfortunately it looks like Android is only supporting OpenSL|ES 1.0.1 for now :( - */ + if (resultSL != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - /* Don't do anything if the device is not started. */ - if (pDevice->state != MA_STATE_STARTED) { - return; - } + if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - pBuffer = pDevice->opensl.pBufferCapture + (pDevice->opensl.currentBufferIndexCapture * periodSizeInBytes); + if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_PLAY, &pDevice->opensl.pAudioPlayer) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } - if (pDevice->type == ma_device_type_duplex) { - ma_device__handle_duplex_callback_capture(pDevice, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods), pBuffer, &pDevice->opensl.duplexRB); - } else { - ma_device__send_frames_to_client(pDevice, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods), pBuffer); + if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + if (MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice) != SL_RESULT_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + } + + /* The internal format is determined by the "pcm" object. */ + ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->playback.internalFormat, &pDevice->playback.internalChannels, &pDevice->playback.internalSampleRate, pDevice->playback.internalChannelMap); + + /* Buffer. */ + bufferSizeInFrames = pConfig->bufferSizeInFrames; + if (bufferSizeInFrames == 0) { + bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pDevice->playback.internalSampleRate); + } + pDevice->playback.internalPeriods = pConfig->periods; + pDevice->playback.internalBufferSizeInFrames = (bufferSizeInFrames / pDevice->playback.internalPeriods) * pDevice->playback.internalPeriods; + pDevice->opensl.currentBufferIndexPlayback = 0; + + bufferSizeInBytes = pDevice->playback.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + pDevice->opensl.pBufferPlayback = (ma_uint8*)ma__calloc_from_callbacks(bufferSizeInBytes, &pContext->allocationCallbacks); + if (pDevice->opensl.pBufferPlayback == NULL) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY); + } + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes); } - resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pBuffer, periodSizeInBytes); - if (resultSL != SL_RESULT_SUCCESS) { - return; + if (pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames); + ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->opensl.duplexRB); + if (result != MA_SUCCESS) { + ma_device_uninit__opensl(pDevice); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to initialize ring buffer.", result); + } + + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods; + void* pMarginData; + ma_pcm_rb_acquire_write(&pDevice->opensl.duplexRB, &marginSizeInFrames, &pMarginData); + { + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->opensl.duplexRB, marginSizeInFrames, pMarginData); + } } - pDevice->opensl.currentBufferIndexCapture = (pDevice->opensl.currentBufferIndexCapture + 1) % pDevice->capture.internalPeriods; + return MA_SUCCESS; +#else + return MA_NO_BACKEND; /* Non-Android implementations are not supported. */ +#endif } -void ma_buffer_queue_callback_playback__opensl_android(SLAndroidSimpleBufferQueueItf pBufferQueue, void* pUserData) +static ma_result ma_device_start__opensl(ma_device* pDevice) { - ma_device* pDevice = (ma_device*)pUserData; - size_t periodSizeInBytes; - ma_uint8* pBuffer; SLresult resultSL; + size_t periodSizeInBytes; + ma_uint32 iPeriod; - ma_assert(pDevice != NULL); - - (void)pBufferQueue; + MA_ASSERT(pDevice != NULL); - /* Don't do anything if the device is not started. */ - if (pDevice->state != MA_STATE_STARTED) { - return; + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */ + if (g_maOpenSLInitCounter == 0) { + return MA_INVALID_OPERATION; } - periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - pBuffer = pDevice->opensl.pBufferPlayback + (pDevice->opensl.currentBufferIndexPlayback * periodSizeInBytes); + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device.", MA_FAILED_TO_START_BACKEND_DEVICE); + } - if (pDevice->type == ma_device_type_duplex) { - ma_device__handle_duplex_callback_playback(pDevice, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods), pBuffer, &pDevice->opensl.duplexRB); - } else { - ma_device__read_frames_from_client(pDevice, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods), pBuffer); + periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device.", MA_FAILED_TO_START_BACKEND_DEVICE); + } + } } - resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pBuffer, periodSizeInBytes); - if (resultSL != SL_RESULT_SUCCESS) { - return; + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device.", MA_FAILED_TO_START_BACKEND_DEVICE); + } + + /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueu silent buffers. */ + if (pDevice->type == ma_device_type_duplex) { + MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + } else { + ma_device__read_frames_from_client(pDevice, pDevice->playback.internalBufferSizeInFrames, pDevice->opensl.pBufferPlayback); + } + + periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); + for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { + resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes); + if (resultSL != SL_RESULT_SUCCESS) { + MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device.", MA_FAILED_TO_START_BACKEND_DEVICE); + } + } } - pDevice->opensl.currentBufferIndexPlayback = (pDevice->opensl.currentBufferIndexPlayback + 1) % pDevice->playback.internalPeriods; + return MA_SUCCESS; } -#endif -void ma_device_uninit__opensl(ma_device* pDevice) +static ma_result ma_device_stop__opensl(ma_device* pDevice) { - ma_assert(pDevice != NULL); + SLresult resultSL; + ma_stop_proc onStop; - ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before uninitializing the device. */ + MA_ASSERT(pDevice != NULL); + + MA_ASSERT(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */ if (g_maOpenSLInitCounter == 0) { - return; + return MA_INVALID_OPERATION; } + /* TODO: Wait until all buffers have been processed. Hint: Maybe SLAndroidSimpleBufferQueue::GetState() could be used in a loop? */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - if (pDevice->opensl.pAudioRecorderObj) { - MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); + resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device.", MA_FAILED_TO_STOP_BACKEND_DEVICE); } - ma_free(pDevice->opensl.pBufferCapture); + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture); } if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - if (pDevice->opensl.pAudioPlayerObj) { - MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); - } - if (pDevice->opensl.pOutputMixObj) { - MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); + resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); + if (resultSL != SL_RESULT_SUCCESS) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device.", MA_FAILED_TO_STOP_BACKEND_DEVICE); } - ma_free(pDevice->opensl.pBufferPlayback); + MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback); } - if (pDevice->type == ma_device_type_duplex) { - ma_pcm_rb_uninit(&pDevice->opensl.duplexRB); + /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */ + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); } + + return MA_SUCCESS; } -#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 -typedef SLAndroidDataFormat_PCM_EX ma_SLDataFormat_PCM; -#else -typedef SLDataFormat_PCM ma_SLDataFormat_PCM; -#endif -ma_result ma_SLDataFormat_PCM_init__opensl(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, const ma_channel* channelMap, ma_SLDataFormat_PCM* pDataFormat) +static ma_result ma_context_uninit__opensl(ma_context* pContext) { -#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 - if (format == ma_format_f32) { - pDataFormat->formatType = SL_ANDROID_DATAFORMAT_PCM_EX; - pDataFormat->representation = SL_ANDROID_PCM_REPRESENTATION_FLOAT; - } else { - pDataFormat->formatType = SL_DATAFORMAT_PCM; + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_opensl); + (void)pContext; + + /* Uninit global data. */ + if (g_maOpenSLInitCounter > 0) { + if (ma_atomic_decrement_32(&g_maOpenSLInitCounter) == 0) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + } } -#else - pDataFormat->formatType = SL_DATAFORMAT_PCM; -#endif - pDataFormat->numChannels = channels; - ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = ma_round_to_standard_sample_rate__opensl(sampleRate * 1000); /* In millihertz. Annoyingly, the sample rate variable is named differently between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM */ - pDataFormat->bitsPerSample = ma_get_bytes_per_sample(format)*8; - pDataFormat->channelMask = ma_channel_map_to_channel_mask__opensl(channelMap, channels); - pDataFormat->endianness = (ma_is_little_endian()) ? SL_BYTEORDER_LITTLEENDIAN : SL_BYTEORDER_BIGENDIAN; + return MA_SUCCESS; +} - /* - Android has a few restrictions on the format as documented here: https://developer.android.com/ndk/guides/audio/opensl-for-android.html - - Only mono and stereo is supported. - - Only u8 and s16 formats are supported. - - Maximum sample rate of 48000. - */ -#ifdef MA_ANDROID - if (pDataFormat->numChannels > 2) { - pDataFormat->numChannels = 2; - } -#if __ANDROID_API__ >= 21 - if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { - /* It's floating point. */ - ma_assert(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); - if (pDataFormat->bitsPerSample > 32) { - pDataFormat->bitsPerSample = 32; +static ma_result ma_context_init__opensl(const ma_context_config* pConfig, ma_context* pContext) +{ + MA_ASSERT(pContext != NULL); + + (void)pConfig; + + /* Initialize global data first if applicable. */ + if (ma_atomic_increment_32(&g_maOpenSLInitCounter) == 1) { + SLresult resultSL = slCreateEngine(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL); + if (resultSL != SL_RESULT_SUCCESS) { + ma_atomic_decrement_32(&g_maOpenSLInitCounter); + return MA_NO_BACKEND; } - } else { - if (pDataFormat->bitsPerSample > 16) { - pDataFormat->bitsPerSample = 16; + + (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE); + + resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_ENGINE, &g_maEngineSL); + if (resultSL != SL_RESULT_SUCCESS) { + (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); + ma_atomic_decrement_32(&g_maOpenSLInitCounter); + return MA_NO_BACKEND; } } -#else - if (pDataFormat->bitsPerSample > 16) { - pDataFormat->bitsPerSample = 16; - } -#endif - if (((SLDataFormat_PCM*)pDataFormat)->samplesPerSec > SL_SAMPLINGRATE_48) { - ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec = SL_SAMPLINGRATE_48; - } -#endif - pDataFormat->containerSize = pDataFormat->bitsPerSample; /* Always tightly packed for now. */ + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__opensl; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__opensl; + pContext->onEnumDevices = ma_context_enumerate_devices__opensl; + pContext->onGetDeviceInfo = ma_context_get_device_info__opensl; + pContext->onDeviceInit = ma_device_init__opensl; + pContext->onDeviceUninit = ma_device_uninit__opensl; + pContext->onDeviceStart = ma_device_start__opensl; + pContext->onDeviceStop = ma_device_stop__opensl; return MA_SUCCESS; } +#endif /* OpenSL|ES */ + + +/****************************************************************************** + +Web Audio Backend + +******************************************************************************/ +#ifdef MA_HAS_WEBAUDIO +#include -ma_result ma_deconstruct_SLDataFormat_PCM__opensl(ma_SLDataFormat_PCM* pDataFormat, ma_format* pFormat, ma_uint32* pChannels, ma_uint32* pSampleRate, ma_channel* pChannelMap) +static ma_bool32 ma_is_capture_supported__webaudio() { - ma_bool32 isFloatingPoint = MA_FALSE; -#if defined(MA_ANDROID) && __ANDROID_API__ >= 21 - if (pDataFormat->formatType == SL_ANDROID_DATAFORMAT_PCM_EX) { - ma_assert(pDataFormat->representation == SL_ANDROID_PCM_REPRESENTATION_FLOAT); - isFloatingPoint = MA_TRUE; - } + return EM_ASM_INT({ + return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined); + }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */ +} + +#ifdef __cplusplus +extern "C" { #endif - if (isFloatingPoint) { - if (pDataFormat->bitsPerSample == 32) { - *pFormat = ma_format_f32; - } +EMSCRIPTEN_KEEPALIVE void ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames) +{ + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_capture(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB); } else { - if (pDataFormat->bitsPerSample == 8) { - *pFormat = ma_format_u8; - } else if (pDataFormat->bitsPerSample == 16) { - *pFormat = ma_format_s16; - } else if (pDataFormat->bitsPerSample == 24) { - *pFormat = ma_format_s24; - } else if (pDataFormat->bitsPerSample == 32) { - *pFormat = ma_format_s32; - } + ma_device__send_frames_to_client(pDevice, (ma_uint32)frameCount, pFrames); /* Send directly to the client. */ } +} - *pChannels = pDataFormat->numChannels; - *pSampleRate = ((SLDataFormat_PCM*)pDataFormat)->samplesPerSec / 1000; - ma_channel_mask_to_channel_map__opensl(pDataFormat->channelMask, pDataFormat->numChannels, pChannelMap); +EMSCRIPTEN_KEEPALIVE void ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames) +{ + if (pDevice->type == ma_device_type_duplex) { + ma_device__handle_duplex_callback_playback(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB); + } else { + ma_device__read_frames_from_client(pDevice, (ma_uint32)frameCount, pFrames); /* Read directly from the device. */ + } +} +#ifdef __cplusplus +} +#endif + +static ma_bool32 ma_context_is_device_id_equal__webaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) +{ + MA_ASSERT(pContext != NULL); + MA_ASSERT(pID0 != NULL); + MA_ASSERT(pID1 != NULL); + (void)pContext; - return MA_SUCCESS; + return ma_strcmp(pID0->webaudio, pID1->webaudio) == 0; } -ma_result ma_device_init__opensl(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +static ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) { -#ifdef MA_ANDROID - SLDataLocator_AndroidSimpleBufferQueue queue; - SLresult resultSL; - ma_uint32 bufferSizeInFrames; - size_t bufferSizeInBytes; - const SLInterfaceID itfIDs1[] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE}; - const SLboolean itfIDsRequired1[] = {SL_BOOLEAN_TRUE}; -#endif + ma_bool32 cbResult = MA_TRUE; - (void)pContext; + MA_ASSERT(pContext != NULL); + MA_ASSERT(callback != NULL); - ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to initialize a new device. */ - if (g_maOpenSLInitCounter == 0) { - return MA_INVALID_OPERATION; - } + /* Only supporting default devices for now. */ - if (pConfig->deviceType == ma_device_type_loopback) { - return MA_DEVICE_TYPE_NOT_SUPPORTED; + /* Playback. */ + if (cbResult) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); } - /* - For now, only supporting Android implementations of OpenSL|ES since that's the only one I've - been able to test with and I currently depend on Android-specific extensions (simple buffer - queues). - */ -#ifdef MA_ANDROID - /* No exclusive mode with OpenSL|ES. */ - if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || - ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { - return MA_SHARE_MODE_NOT_SUPPORTED; + /* Capture. */ + if (cbResult) { + if (ma_is_capture_supported__webaudio()) { + ma_device_info deviceInfo; + MA_ZERO_OBJECT(&deviceInfo); + ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); + } } - /* Now we can start initializing the device properly. */ - ma_assert(pDevice != NULL); - ma_zero_object(&pDevice->opensl); - - queue.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE; - queue.numBuffers = pConfig->periods; + return MA_SUCCESS; +} +static ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + MA_ASSERT(pContext != NULL); - if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { - ma_SLDataFormat_PCM pcm; - SLDataLocator_IODevice locatorDevice; - SLDataSource source; - SLDataSink sink; + /* No exclusive mode with Web Audio. */ + if (shareMode == ma_share_mode_exclusive) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } - ma_SLDataFormat_PCM_init__opensl(pConfig->capture.format, pConfig->capture.channels, pConfig->sampleRate, pConfig->capture.channelMap, &pcm); + if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { + return MA_NO_DEVICE; + } - locatorDevice.locatorType = SL_DATALOCATOR_IODEVICE; - locatorDevice.deviceType = SL_IODEVICE_AUDIOINPUT; - locatorDevice.deviceID = (pConfig->capture.pDeviceID == NULL) ? SL_DEFAULTDEVICEID_AUDIOINPUT : pConfig->capture.pDeviceID->opensl; - locatorDevice.device = NULL; - source.pLocator = &locatorDevice; - source.pFormat = NULL; + MA_ZERO_MEMORY(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio)); - sink.pLocator = &queue; - sink.pFormat = (SLDataFormat_PCM*)&pcm; + /* Only supporting default devices for now. */ + if (deviceType == ma_device_type_playback) { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); + } else { + ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); + } - resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); - if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) { - /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ - pcm.formatType = SL_DATAFORMAT_PCM; - pcm.numChannels = 1; - ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; /* The name of the sample rate variable is different between SLAndroidDataFormat_PCM_EX and SLDataFormat_PCM. */ - pcm.bitsPerSample = 16; - pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ - pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; - resultSL = (*g_maEngineSL)->CreateAudioRecorder(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioRecorderObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); - } + /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */ + pDeviceInfo->minChannels = 1; + pDeviceInfo->maxChannels = MA_MAX_CHANNELS; + if (pDeviceInfo->maxChannels > 32) { + pDeviceInfo->maxChannels = 32; /* Maximum output channel count is 32 for createScriptProcessor() (JavaScript). */ + } - if (resultSL != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio recorder.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + /* We can query the sample rate by just using a temporary audio context. */ + pDeviceInfo->minSampleRate = EM_ASM_INT({ + try { + var temp = new (window.AudioContext || window.webkitAudioContext)(); + var sampleRate = temp.sampleRate; + temp.close(); + return sampleRate; + } catch(e) { + return 0; } + }, 0); /* Must pass in a dummy argument for C99 compatibility. */ + pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; + if (pDeviceInfo->minSampleRate == 0) { + return MA_NO_DEVICE; + } - if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Realize((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_BOOLEAN_FALSE) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio recorder.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + /* Web Audio only supports f32. */ + pDeviceInfo->formatCount = 1; + pDeviceInfo->formats[0] = ma_format_f32; - if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_RECORD, &pDevice->opensl.pAudioRecorder) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_RECORD interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + return MA_SUCCESS; +} - if (MA_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioRecorderObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueueCapture) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } - if (MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, ma_buffer_queue_callback_capture__opensl_android, pDevice) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } +static void ma_device_uninit_by_index__webaudio(ma_device* pDevice, ma_device_type deviceType, int deviceIndex) +{ + MA_ASSERT(pDevice != NULL); - /* The internal format is determined by the "pcm" object. */ - ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->capture.internalFormat, &pDevice->capture.internalChannels, &pDevice->capture.internalSampleRate, pDevice->capture.internalChannelMap); + EM_ASM({ + var device = miniaudio.get_device_by_index($0); - /* Buffer. */ - bufferSizeInFrames = pConfig->bufferSizeInFrames; - if (bufferSizeInFrames == 0) { - bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pDevice->capture.internalSampleRate); + /* Make sure all nodes are disconnected and marked for collection. */ + if (device.scriptNode !== undefined) { + device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */ + device.scriptNode.disconnect(); + device.scriptNode = undefined; + } + if (device.streamNode !== undefined) { + device.streamNode.disconnect(); + device.streamNode = undefined; } - pDevice->capture.internalPeriods = pConfig->periods; - pDevice->capture.internalBufferSizeInFrames = (bufferSizeInFrames / pDevice->capture.internalPeriods) * pDevice->capture.internalPeriods; - pDevice->opensl.currentBufferIndexCapture = 0; - bufferSizeInBytes = pDevice->capture.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - pDevice->opensl.pBufferCapture = (ma_uint8*)ma_malloc(bufferSizeInBytes); - if (pDevice->opensl.pBufferCapture == NULL) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY); + /* + Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want + to clear the callback before closing. + */ + device.webaudio.close(); + device.webaudio = undefined; + + /* Can't forget to free the intermediary buffer. This is the buffer that's shared between JavaScript and C. */ + if (device.intermediaryBuffer !== undefined) { + Module._free(device.intermediaryBuffer); + device.intermediaryBuffer = undefined; + device.intermediaryBufferView = undefined; + device.intermediaryBufferSizeInBytes = undefined; } - MA_ZERO_MEMORY(pDevice->opensl.pBufferCapture, bufferSizeInBytes); - } - if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { - ma_SLDataFormat_PCM pcm; - SLDataSource source; - SLDataLocator_OutputMix outmixLocator; - SLDataSink sink; + /* Make sure the device is untracked so the slot can be reused later. */ + miniaudio.untrack_device_by_index($0); + }, deviceIndex, deviceType); +} - ma_SLDataFormat_PCM_init__opensl(pConfig->playback.format, pConfig->playback.channels, pConfig->sampleRate, pConfig->playback.channelMap, &pcm); +static void ma_device_uninit__webaudio(ma_device* pDevice) +{ + MA_ASSERT(pDevice != NULL); - resultSL = (*g_maEngineSL)->CreateOutputMix(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pOutputMixObj, 0, NULL, NULL); - if (resultSL != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create output mix.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); + } - if (MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Realize((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_BOOLEAN_FALSE)) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize output mix object.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback); + } - if (MA_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->GetInterface((SLObjectItf)pDevice->opensl.pOutputMixObj, SL_IID_OUTPUTMIX, &pDevice->opensl.pOutputMix) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_OUTPUTMIX interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + if (pDevice->type == ma_device_type_duplex) { + ma_pcm_rb_uninit(&pDevice->webaudio.duplexRB); + } +} - /* Set the output device. */ - if (pConfig->playback.pDeviceID != NULL) { - SLuint32 deviceID_OpenSL = pConfig->playback.pDeviceID->opensl; - MA_OPENSL_OUTPUTMIX(pDevice->opensl.pOutputMix)->ReRoute((SLOutputMixItf)pDevice->opensl.pOutputMix, 1, &deviceID_OpenSL); - } - - source.pLocator = &queue; - source.pFormat = (SLDataFormat_PCM*)&pcm; +static ma_result ma_device_init_by_type__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) +{ + int deviceIndex; + ma_uint32 internalBufferSizeInFrames; - outmixLocator.locatorType = SL_DATALOCATOR_OUTPUTMIX; - outmixLocator.outputMix = (SLObjectItf)pDevice->opensl.pOutputMixObj; + MA_ASSERT(pContext != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(deviceType != ma_device_type_duplex); + MA_ASSERT(pDevice != NULL); - sink.pLocator = &outmixLocator; - sink.pFormat = NULL; + if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { + return MA_NO_DEVICE; + } - resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); - if (resultSL == SL_RESULT_CONTENT_UNSUPPORTED) { - /* Unsupported format. Fall back to something safer and try again. If this fails, just abort. */ - pcm.formatType = SL_DATAFORMAT_PCM; - pcm.numChannels = 2; - ((SLDataFormat_PCM*)&pcm)->samplesPerSec = SL_SAMPLINGRATE_16; - pcm.bitsPerSample = 16; - pcm.containerSize = pcm.bitsPerSample; /* Always tightly packed for now. */ - pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; - resultSL = (*g_maEngineSL)->CreateAudioPlayer(g_maEngineSL, (SLObjectItf*)&pDevice->opensl.pAudioPlayerObj, &source, &sink, 1, itfIDs1, itfIDsRequired1); - } + /* Try calculating an appropriate buffer size. */ + internalBufferSizeInFrames = pConfig->bufferSizeInFrames; + if (internalBufferSizeInFrames == 0) { + internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pConfig->sampleRate); + } - if (resultSL != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to create audio player.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + /* The size of the buffer must be a power of 2 and between 256 and 16384. */ + if (internalBufferSizeInFrames < 256) { + internalBufferSizeInFrames = 256; + } else if (internalBufferSizeInFrames > 16384) { + internalBufferSizeInFrames = 16384; + } else { + internalBufferSizeInFrames = ma_next_power_of_2(internalBufferSizeInFrames); + } - if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Realize((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_BOOLEAN_FALSE) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to realize audio player.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + /* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */ + deviceIndex = EM_ASM_INT({ + var channels = $0; + var sampleRate = $1; + var bufferSize = $2; /* In PCM frames. */ + var isCapture = $3; + var pDevice = $4; - if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_PLAY, &pDevice->opensl.pAudioPlayer) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_PLAY interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); + if (typeof(miniaudio) === 'undefined') { + return -1; /* Context not initialized. */ } - if (MA_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->GetInterface((SLObjectItf)pDevice->opensl.pAudioPlayerObj, SL_IID_ANDROIDSIMPLEBUFFERQUEUE, &pDevice->opensl.pBufferQueuePlayback) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to retrieve SL_IID_ANDROIDSIMPLEBUFFERQUEUE interface.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + var device = {}; - if (MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->RegisterCallback((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, ma_buffer_queue_callback_playback__opensl_android, pDevice) != SL_RESULT_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to register buffer queue callback.", MA_FAILED_TO_OPEN_BACKEND_DEVICE); - } + /* The AudioContext must be created in a suspended state. */ + device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate}); + device.webaudio.suspend(); - /* The internal format is determined by the "pcm" object. */ - ma_deconstruct_SLDataFormat_PCM__opensl(&pcm, &pDevice->playback.internalFormat, &pDevice->playback.internalChannels, &pDevice->playback.internalSampleRate, pDevice->playback.internalChannelMap); + /* + We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. Because it's passed between + JavaScript and C it needs to be allocated and freed using Module._malloc() and Module._free(). + */ + device.intermediaryBufferSizeInBytes = channels * bufferSize * 4; + device.intermediaryBuffer = Module._malloc(device.intermediaryBufferSizeInBytes); + device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes); - /* Buffer. */ - bufferSizeInFrames = pConfig->bufferSizeInFrames; - if (bufferSizeInFrames == 0) { - bufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pDevice->playback.internalSampleRate); - } - pDevice->playback.internalPeriods = pConfig->periods; - pDevice->playback.internalBufferSizeInFrames = (bufferSizeInFrames / pDevice->playback.internalPeriods) * pDevice->playback.internalPeriods; - pDevice->opensl.currentBufferIndexPlayback = 0; + /* + Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations. - bufferSizeInBytes = pDevice->playback.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - pDevice->opensl.pBufferPlayback = (ma_uint8*)ma_malloc(bufferSizeInBytes); - if (pDevice->opensl.pBufferPlayback == NULL) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to allocate memory for data buffer.", MA_OUT_OF_MEMORY); - } - MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, bufferSizeInBytes); - } + ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback + that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of + something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to + work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL + implementation. I'll be avoiding that insane AudioWorklet API like the plague... - if (pConfig->deviceType == ma_device_type_duplex) { - ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames); - ma_result result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->opensl.duplexRB); - if (result != MA_SUCCESS) { - ma_device_uninit__opensl(pDevice); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to initialize ring buffer.", result); - } + For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the + playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the + MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've + been unable to figure out how to get this as raw PCM. The closes I can think is to use the MIME type for WAV files and just parse it, but I don't know + how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like + this for now. If anything knows how I could get raw PCM data using the MediaRecorder API please let me know! + */ + device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channels, channels); - /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ - { - ma_uint32 marginSizeInFrames = rbSizeInFrames / pDevice->capture.internalPeriods; - void* pMarginData; - ma_pcm_rb_acquire_write(&pDevice->opensl.duplexRB, &marginSizeInFrames, &pMarginData); - { - ma_zero_memory(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); - } - ma_pcm_rb_commit_write(&pDevice->opensl.duplexRB, marginSizeInFrames, pMarginData); - } - } + if (isCapture) { + device.scriptNode.onaudioprocess = function(e) { + if (device.intermediaryBuffer === undefined) { + return; /* This means the device has been uninitialized. */ + } - return MA_SUCCESS; -#else - return MA_NO_BACKEND; /* Non-Android implementations are not supported. */ -#endif -} + /* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */ + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + e.outputBuffer.getChannelData(iChannel).fill(0.0); + } -ma_result ma_device_start__opensl(ma_device* pDevice) -{ - SLresult resultSL; - size_t periodSizeInBytes; - ma_uint32 iPeriod; + /* There are some situations where we may want to send silence to the client. */ + var sendSilence = false; + if (device.streamNode === undefined) { + sendSilence = true; + } - ma_assert(pDevice != NULL); + /* Sanity check. This will never happen, right? */ + if (e.inputBuffer.numberOfChannels != channels) { + console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence."); + sendSilence = true; + } - ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it and then attempted to start the device. */ - if (g_maOpenSLInitCounter == 0) { - return MA_INVALID_OPERATION; - } + /* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */ + var totalFramesProcessed = 0; + while (totalFramesProcessed < e.inputBuffer.length) { + var framesRemaining = e.inputBuffer.length - totalFramesProcessed; + var framesToProcess = framesRemaining; + if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) { + framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4); + } - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_RECORDING); - if (resultSL != SL_RESULT_SUCCESS) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal capture device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } + /* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */ + if (sendSilence) { + device.intermediaryBufferView.fill(0.0); + } else { + for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { + for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) { + device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame]; + } + } + } - periodSizeInBytes = (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods) * ma_get_bytes_per_frame(pDevice->capture.internalFormat, pDevice->capture.internalChannels); - for (iPeriod = 0; iPeriod < pDevice->capture.internalPeriods; ++iPeriod) { - resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture, pDevice->opensl.pBufferCapture + (periodSizeInBytes * iPeriod), periodSizeInBytes); - if (resultSL != SL_RESULT_SUCCESS) { - MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for capture device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } - } - } + /* Send data to the client from our intermediary buffer. */ + ccall("ma_device_process_pcm_frames_capture__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]); - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_PLAYING); - if (resultSL != SL_RESULT_SUCCESS) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to start internal playback device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } + totalFramesProcessed += framesToProcess; + } + }; - /* In playback mode (no duplex) we need to load some initial buffers. In duplex mode we need to enqueu silent buffers. */ - if (pDevice->type == ma_device_type_duplex) { - MA_ZERO_MEMORY(pDevice->opensl.pBufferPlayback, pDevice->playback.internalBufferSizeInFrames * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels)); + navigator.mediaDevices.getUserMedia({audio:true, video:false}) + .then(function(stream) { + device.streamNode = device.webaudio.createMediaStreamSource(stream); + device.streamNode.connect(device.scriptNode); + device.scriptNode.connect(device.webaudio.destination); + }) + .catch(function(error) { + /* I think this should output silence... */ + device.scriptNode.connect(device.webaudio.destination); + }); } else { - ma_device__read_frames_from_client(pDevice, pDevice->playback.internalBufferSizeInFrames, pDevice->opensl.pBufferPlayback); - } + device.scriptNode.onaudioprocess = function(e) { + if (device.intermediaryBuffer === undefined) { + return; /* This means the device has been uninitialized. */ + } - periodSizeInBytes = (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods) * ma_get_bytes_per_frame(pDevice->playback.internalFormat, pDevice->playback.internalChannels); - for (iPeriod = 0; iPeriod < pDevice->playback.internalPeriods; ++iPeriod) { - resultSL = MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Enqueue((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback, pDevice->opensl.pBufferPlayback + (periodSizeInBytes * iPeriod), periodSizeInBytes); - if (resultSL != SL_RESULT_SUCCESS) { - MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to enqueue buffer for playback device.", MA_FAILED_TO_START_BACKEND_DEVICE); - } - } - } + var outputSilence = false; - return MA_SUCCESS; -} + /* Sanity check. This will never happen, right? */ + if (e.outputBuffer.numberOfChannels != channels) { + console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence."); + outputSilence = true; + return; + } -ma_result ma_device_stop__opensl(ma_device* pDevice) -{ - SLresult resultSL; - ma_stop_proc onStop; + /* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */ + var totalFramesProcessed = 0; + while (totalFramesProcessed < e.outputBuffer.length) { + var framesRemaining = e.outputBuffer.length - totalFramesProcessed; + var framesToProcess = framesRemaining; + if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) { + framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4); + } - ma_assert(pDevice != NULL); + /* Read data from the client into our intermediary buffer. */ + ccall("ma_device_process_pcm_frames_playback__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]); - ma_assert(g_maOpenSLInitCounter > 0); /* <-- If you trigger this it means you've either not initialized the context, or you've uninitialized it before stopping/uninitializing the device. */ - if (g_maOpenSLInitCounter == 0) { - return MA_INVALID_OPERATION; - } + /* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */ + if (outputSilence) { + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + e.outputBuffer.getChannelData(iChannel).fill(0.0); + } + } else { + for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { + for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { + e.outputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame] = device.intermediaryBufferView[iFrame*channels + iChannel]; + } + } + } - /* TODO: Wait until all buffers have been processed. Hint: Maybe SLAndroidSimpleBufferQueue::GetState() could be used in a loop? */ + totalFramesProcessed += framesToProcess; + } + }; - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - resultSL = MA_OPENSL_RECORD(pDevice->opensl.pAudioRecorder)->SetRecordState((SLRecordItf)pDevice->opensl.pAudioRecorder, SL_RECORDSTATE_STOPPED); - if (resultSL != SL_RESULT_SUCCESS) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal capture device.", MA_FAILED_TO_STOP_BACKEND_DEVICE); + device.scriptNode.connect(device.webaudio.destination); } - MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueueCapture)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueueCapture); - } - - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - resultSL = MA_OPENSL_PLAY(pDevice->opensl.pAudioPlayer)->SetPlayState((SLPlayItf)pDevice->opensl.pAudioPlayer, SL_PLAYSTATE_STOPPED); - if (resultSL != SL_RESULT_SUCCESS) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "[OpenSL] Failed to stop internal playback device.", MA_FAILED_TO_STOP_BACKEND_DEVICE); - } + return miniaudio.track_device(device); + }, (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels, pConfig->sampleRate, internalBufferSizeInFrames, deviceType == ma_device_type_capture, pDevice); - MA_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueuePlayback)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueuePlayback); + if (deviceIndex < 0) { + return MA_FAILED_TO_OPEN_BACKEND_DEVICE; } - /* Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. */ - onStop = pDevice->onStop; - if (onStop) { - onStop(pDevice); + if (deviceType == ma_device_type_capture) { + pDevice->webaudio.indexCapture = deviceIndex; + pDevice->capture.internalFormat = ma_format_f32; + pDevice->capture.internalChannels = pConfig->capture.channels; + ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); + pDevice->capture.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); + pDevice->capture.internalBufferSizeInFrames = internalBufferSizeInFrames; + pDevice->capture.internalPeriods = 1; + } else { + pDevice->webaudio.indexPlayback = deviceIndex; + pDevice->playback.internalFormat = ma_format_f32; + pDevice->playback.internalChannels = pConfig->playback.channels; + ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); + pDevice->playback.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); + pDevice->playback.internalBufferSizeInFrames = internalBufferSizeInFrames; + pDevice->playback.internalPeriods = 1; } return MA_SUCCESS; } - -ma_result ma_context_uninit__opensl(ma_context* pContext) +static ma_result ma_device_init__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_opensl); - (void)pContext; + ma_result result; - /* Uninit global data. */ - if (g_maOpenSLInitCounter > 0) { - if (ma_atomic_decrement_32(&g_maOpenSLInitCounter) == 0) { - (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); - } + if (pConfig->deviceType == ma_device_type_loopback) { + return MA_DEVICE_TYPE_NOT_SUPPORTED; } - return MA_SUCCESS; -} - -ma_result ma_context_init__opensl(const ma_context_config* pConfig, ma_context* pContext) -{ - ma_assert(pContext != NULL); - - (void)pConfig; + /* No exclusive mode with Web Audio. */ + if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || + ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { + return MA_SHARE_MODE_NOT_SUPPORTED; + } - /* Initialize global data first if applicable. */ - if (ma_atomic_increment_32(&g_maOpenSLInitCounter) == 1) { - SLresult resultSL = slCreateEngine(&g_maEngineObjectSL, 0, NULL, 0, NULL, NULL); - if (resultSL != SL_RESULT_SUCCESS) { - ma_atomic_decrement_32(&g_maOpenSLInitCounter); - return MA_NO_BACKEND; + if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_capture, pDevice); + if (result != MA_SUCCESS) { + return result; } + } - (*g_maEngineObjectSL)->Realize(g_maEngineObjectSL, SL_BOOLEAN_FALSE); - - resultSL = (*g_maEngineObjectSL)->GetInterface(g_maEngineObjectSL, SL_IID_ENGINE, &g_maEngineSL); - if (resultSL != SL_RESULT_SUCCESS) { - (*g_maEngineObjectSL)->Destroy(g_maEngineObjectSL); - ma_atomic_decrement_32(&g_maOpenSLInitCounter); - return MA_NO_BACKEND; + if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { + result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_playback, pDevice); + if (result != MA_SUCCESS) { + if (pConfig->deviceType == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); + } + return result; } } - pContext->isBackendAsynchronous = MA_TRUE; + /* + We need a ring buffer for moving data from the capture device to the playback device. The capture callback is the producer + and the playback callback is the consumer. The buffer needs to be large enough to hold internalBufferSizeInFrames based on + the external sample rate. + */ + if (pConfig->deviceType == ma_device_type_duplex) { + ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames) * 2; + result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->pContext->allocationCallbacks, &pDevice->webaudio.duplexRB); + if (result != MA_SUCCESS) { + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback); + } + return result; + } - pContext->onUninit = ma_context_uninit__opensl; - pContext->onDeviceIDEqual = ma_context_is_device_id_equal__opensl; - pContext->onEnumDevices = ma_context_enumerate_devices__opensl; - pContext->onGetDeviceInfo = ma_context_get_device_info__opensl; - pContext->onDeviceInit = ma_device_init__opensl; - pContext->onDeviceUninit = ma_device_uninit__opensl; - pContext->onDeviceStart = ma_device_start__opensl; - pContext->onDeviceStop = ma_device_stop__opensl; + /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ + { + ma_uint32 marginSizeInFrames = rbSizeInFrames / 3; /* <-- Dividing by 3 because internalPeriods is always set to 1 for WebAudio. */ + void* pMarginData; + ma_pcm_rb_acquire_write(&pDevice->webaudio.duplexRB, &marginSizeInFrames, &pMarginData); + { + MA_ZERO_MEMORY(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); + } + ma_pcm_rb_commit_write(&pDevice->webaudio.duplexRB, marginSizeInFrames, pMarginData); + } + } return MA_SUCCESS; } -#endif /* OpenSL|ES */ - - -/****************************************************************************** - -Web Audio Backend - -******************************************************************************/ -#ifdef MA_HAS_WEBAUDIO -#include -ma_bool32 ma_is_capture_supported__webaudio() +static ma_result ma_device_start__webaudio(ma_device* pDevice) { - return EM_ASM_INT({ - return (navigator.mediaDevices !== undefined && navigator.mediaDevices.getUserMedia !== undefined); - }, 0) != 0; /* Must pass in a dummy argument for C99 compatibility. */ -} + MA_ASSERT(pDevice != NULL); -#ifdef __cplusplus -extern "C" { -#endif -EMSCRIPTEN_KEEPALIVE void ma_device_process_pcm_frames_capture__webaudio(ma_device* pDevice, int frameCount, float* pFrames) -{ - if (pDevice->type == ma_device_type_duplex) { - ma_device__handle_duplex_callback_capture(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB); - } else { - ma_device__send_frames_to_client(pDevice, (ma_uint32)frameCount, pFrames); /* Send directly to the client. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.resume(); + }, pDevice->webaudio.indexCapture); } -} -EMSCRIPTEN_KEEPALIVE void ma_device_process_pcm_frames_playback__webaudio(ma_device* pDevice, int frameCount, float* pFrames) -{ - if (pDevice->type == ma_device_type_duplex) { - ma_device__handle_duplex_callback_playback(pDevice, (ma_uint32)frameCount, pFrames, &pDevice->webaudio.duplexRB); - } else { - ma_device__read_frames_from_client(pDevice, (ma_uint32)frameCount, pFrames); /* Read directly from the device. */ + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.resume(); + }, pDevice->webaudio.indexPlayback); } -} -#ifdef __cplusplus -} -#endif - -ma_bool32 ma_context_is_device_id_equal__webaudio(ma_context* pContext, const ma_device_id* pID0, const ma_device_id* pID1) -{ - ma_assert(pContext != NULL); - ma_assert(pID0 != NULL); - ma_assert(pID1 != NULL); - (void)pContext; - return ma_strcmp(pID0->webaudio, pID1->webaudio) == 0; + return MA_SUCCESS; } -ma_result ma_context_enumerate_devices__webaudio(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +static ma_result ma_device_stop__webaudio(ma_device* pDevice) { - ma_bool32 cbResult = MA_TRUE; - - ma_assert(pContext != NULL); - ma_assert(callback != NULL); + MA_ASSERT(pDevice != NULL); - /* Only supporting default devices for now. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.suspend(); + }, pDevice->webaudio.indexCapture); + } - /* Playback. */ - if (cbResult) { - ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); - ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); - cbResult = callback(pContext, ma_device_type_playback, &deviceInfo, pUserData); + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + EM_ASM({ + miniaudio.get_device_by_index($0).webaudio.suspend(); + }, pDevice->webaudio.indexPlayback); } - /* Capture. */ - if (cbResult) { - if (ma_is_capture_supported__webaudio()) { - ma_device_info deviceInfo; - ma_zero_object(&deviceInfo); - ma_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); - cbResult = callback(pContext, ma_device_type_capture, &deviceInfo, pUserData); - } + ma_stop_proc onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); } return MA_SUCCESS; } -ma_result ma_context_get_device_info__webaudio(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +static ma_result ma_context_uninit__webaudio(ma_context* pContext) { - ma_assert(pContext != NULL); - - /* No exclusive mode with Web Audio. */ - if (shareMode == ma_share_mode_exclusive) { - return MA_SHARE_MODE_NOT_SUPPORTED; - } + MA_ASSERT(pContext != NULL); + MA_ASSERT(pContext->backend == ma_backend_webaudio); - if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { - return MA_NO_DEVICE; - } + /* Nothing needs to be done here. */ + (void)pContext; + return MA_SUCCESS; +} - ma_zero_memory(pDeviceInfo->id.webaudio, sizeof(pDeviceInfo->id.webaudio)); +static ma_result ma_context_init__webaudio(const ma_context_config* pConfig, ma_context* pContext) +{ + int resultFromJS; - /* Only supporting default devices for now. */ - if (deviceType == ma_device_type_playback) { - ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); - } else { - ma_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MA_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); - } + MA_ASSERT(pContext != NULL); - /* Web Audio can support any number of channels and sample rates. It only supports f32 formats, however. */ - pDeviceInfo->minChannels = 1; - pDeviceInfo->maxChannels = MA_MAX_CHANNELS; - if (pDeviceInfo->maxChannels > 32) { - pDeviceInfo->maxChannels = 32; /* Maximum output channel count is 32 for createScriptProcessor() (JavaScript). */ - } + /* Here is where our global JavaScript object is initialized. */ + resultFromJS = EM_ASM_INT({ + if ((window.AudioContext || window.webkitAudioContext) === undefined) { + return 0; /* Web Audio not supported. */ + } - /* We can query the sample rate by just using a temporary audio context. */ - pDeviceInfo->minSampleRate = EM_ASM_INT({ - try { - var temp = new (window.AudioContext || window.webkitAudioContext)(); - var sampleRate = temp.sampleRate; - temp.close(); - return sampleRate; - } catch(e) { - return 0; + if (typeof(miniaudio) === 'undefined') { + miniaudio = {}; + miniaudio.devices = []; /* Device cache for mapping devices to indexes for JavaScript/C interop. */ + + miniaudio.track_device = function(device) { + /* Try inserting into a free slot first. */ + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == null) { + miniaudio.devices[iDevice] = device; + return iDevice; + } + } + + /* Getting here means there is no empty slots in the array so we just push to the end. */ + miniaudio.devices.push(device); + return miniaudio.devices.length - 1; + }; + + miniaudio.untrack_device_by_index = function(deviceIndex) { + /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */ + miniaudio.devices[deviceIndex] = null; + + /* Trim the array if possible. */ + while (miniaudio.devices.length > 0) { + if (miniaudio.devices[miniaudio.devices.length-1] == null) { + miniaudio.devices.pop(); + } else { + break; + } + } + }; + + miniaudio.untrack_device = function(device) { + for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { + if (miniaudio.devices[iDevice] == device) { + return miniaudio.untrack_device_by_index(iDevice); + } + } + }; + + miniaudio.get_device_by_index = function(deviceIndex) { + return miniaudio.devices[deviceIndex]; + }; } + + return 1; }, 0); /* Must pass in a dummy argument for C99 compatibility. */ - pDeviceInfo->maxSampleRate = pDeviceInfo->minSampleRate; - if (pDeviceInfo->minSampleRate == 0) { - return MA_NO_DEVICE; + + if (resultFromJS != 1) { + return MA_FAILED_TO_INIT_BACKEND; } - /* Web Audio only supports f32. */ - pDeviceInfo->formatCount = 1; - pDeviceInfo->formats[0] = ma_format_f32; + pContext->isBackendAsynchronous = MA_TRUE; + + pContext->onUninit = ma_context_uninit__webaudio; + pContext->onDeviceIDEqual = ma_context_is_device_id_equal__webaudio; + pContext->onEnumDevices = ma_context_enumerate_devices__webaudio; + pContext->onGetDeviceInfo = ma_context_get_device_info__webaudio; + pContext->onDeviceInit = ma_device_init__webaudio; + pContext->onDeviceUninit = ma_device_uninit__webaudio; + pContext->onDeviceStart = ma_device_start__webaudio; + pContext->onDeviceStop = ma_device_stop__webaudio; + + (void)pConfig; /* Unused. */ return MA_SUCCESS; } +#endif /* Web Audio */ -void ma_device_uninit_by_index__webaudio(ma_device* pDevice, ma_device_type deviceType, int deviceIndex) -{ - ma_assert(pDevice != NULL); - EM_ASM({ - var device = miniaudio.get_device_by_index($0); +static ma_bool32 ma__is_channel_map_valid(const ma_channel* channelMap, ma_uint32 channels) +{ + /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */ + if (channelMap[0] != MA_CHANNEL_NONE) { + ma_uint32 iChannel; - /* Make sure all nodes are disconnected and marked for collection. */ - if (device.scriptNode !== undefined) { - device.scriptNode.onaudioprocess = function(e) {}; /* We want to reset the callback to ensure it doesn't get called after AudioContext.close() has returned. Shouldn't happen since we're disconnecting, but just to be safe... */ - device.scriptNode.disconnect(); - device.scriptNode = undefined; - } - if (device.streamNode !== undefined) { - device.streamNode.disconnect(); - device.streamNode = undefined; + if (channels == 0) { + return MA_FALSE; /* No channels. */ } - /* - Stop the device. I think there is a chance the callback could get fired after calling this, hence why we want - to clear the callback before closing. - */ - device.webaudio.close(); - device.webaudio = undefined; - - /* Can't forget to free the intermediary buffer. This is the buffer that's shared between JavaScript and C. */ - if (device.intermediaryBuffer !== undefined) { - Module._free(device.intermediaryBuffer); - device.intermediaryBuffer = undefined; - device.intermediaryBufferView = undefined; - device.intermediaryBufferSizeInBytes = undefined; + /* A channel cannot be present in the channel map more than once. */ + for (iChannel = 0; iChannel < channels; ++iChannel) { + ma_uint32 jChannel; + for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) { + if (channelMap[iChannel] == channelMap[jChannel]) { + return MA_FALSE; + } + } } + } - /* Make sure the device is untracked so the slot can be reused later. */ - miniaudio.untrack_device_by_index($0); - }, deviceIndex, deviceType); + return MA_TRUE; } -void ma_device_uninit__webaudio(ma_device* pDevice) + +static ma_result ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType) { - ma_assert(pDevice != NULL); + ma_result result; - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); - } + MA_ASSERT(pDevice != NULL); - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback); + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { + if (pDevice->capture.usingDefaultFormat) { + pDevice->capture.format = pDevice->capture.internalFormat; + } + if (pDevice->capture.usingDefaultChannels) { + pDevice->capture.channels = pDevice->capture.internalChannels; + } + if (pDevice->capture.usingDefaultChannelMap) { + if (pDevice->capture.internalChannels == pDevice->capture.channels) { + ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels); + } else { + ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.channels, pDevice->capture.channelMap); + } + } } - if (pDevice->type == ma_device_type_duplex) { - ma_pcm_rb_uninit(&pDevice->webaudio.duplexRB); + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + if (pDevice->playback.usingDefaultFormat) { + pDevice->playback.format = pDevice->playback.internalFormat; + } + if (pDevice->playback.usingDefaultChannels) { + pDevice->playback.channels = pDevice->playback.internalChannels; + } + if (pDevice->playback.usingDefaultChannelMap) { + if (pDevice->playback.internalChannels == pDevice->playback.channels) { + ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels); + } else { + ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.channels, pDevice->playback.channelMap); + } + } } -} -ma_result ma_device_init_by_type__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device_type deviceType, ma_device* pDevice) -{ - int deviceIndex; - ma_uint32 internalBufferSizeInFrames; + if (pDevice->usingDefaultSampleRate) { + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { + pDevice->sampleRate = pDevice->capture.internalSampleRate; + } else { + pDevice->sampleRate = pDevice->playback.internalSampleRate; + } + } - ma_assert(pContext != NULL); - ma_assert(pConfig != NULL); - ma_assert(deviceType != ma_device_type_duplex); - ma_assert(pDevice != NULL); + /* PCM converters. */ + if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { + /* Converting from internal device format to client format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->capture.internalFormat; + converterConfig.channelsIn = pDevice->capture.internalChannels; + converterConfig.sampleRateIn = pDevice->capture.internalSampleRate; + ma_channel_map_copy(converterConfig.channelMapIn, pDevice->capture.internalChannelMap, pDevice->capture.internalChannels); + converterConfig.formatOut = pDevice->capture.format; + converterConfig.channelsOut = pDevice->capture.channels; + converterConfig.sampleRateOut = pDevice->sampleRate; + ma_channel_map_copy(converterConfig.channelMapOut, pDevice->capture.channelMap, pDevice->capture.channels); + converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfCount = pDevice->resampling.linear.lpfCount; + converterConfig.resampling.speex.quality = pDevice->resampling.speex.quality; - if (deviceType == ma_device_type_capture && !ma_is_capture_supported__webaudio()) { - return MA_NO_DEVICE; + result = ma_data_converter_init(&converterConfig, &pDevice->capture.converter); + if (result != MA_SUCCESS) { + return result; + } } - /* Try calculating an appropriate buffer size. */ - internalBufferSizeInFrames = pConfig->bufferSizeInFrames; - if (internalBufferSizeInFrames == 0) { - internalBufferSizeInFrames = ma_calculate_buffer_size_in_frames_from_milliseconds(pConfig->bufferSizeInMilliseconds, pConfig->sampleRate); - } + if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { + /* Converting from client format to device format. */ + ma_data_converter_config converterConfig = ma_data_converter_config_init_default(); + converterConfig.formatIn = pDevice->playback.format; + converterConfig.channelsIn = pDevice->playback.channels; + converterConfig.sampleRateIn = pDevice->sampleRate; + ma_channel_map_copy(converterConfig.channelMapIn, pDevice->playback.channelMap, pDevice->playback.channels); + converterConfig.formatOut = pDevice->playback.internalFormat; + converterConfig.channelsOut = pDevice->playback.internalChannels; + converterConfig.sampleRateOut = pDevice->playback.internalSampleRate; + ma_channel_map_copy(converterConfig.channelMapOut, pDevice->playback.internalChannelMap, pDevice->playback.internalChannels); + converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; + converterConfig.resampling.algorithm = pDevice->resampling.algorithm; + converterConfig.resampling.linear.lpfCount = pDevice->resampling.linear.lpfCount; + converterConfig.resampling.speex.quality = pDevice->resampling.speex.quality; - /* The size of the buffer must be a power of 2 and between 256 and 16384. */ - if (internalBufferSizeInFrames < 256) { - internalBufferSizeInFrames = 256; - } else if (internalBufferSizeInFrames > 16384) { - internalBufferSizeInFrames = 16384; - } else { - internalBufferSizeInFrames = ma_next_power_of_2(internalBufferSizeInFrames); + result = ma_data_converter_init(&converterConfig, &pDevice->playback.converter); + if (result != MA_SUCCESS) { + return result; + } } - /* We create the device on the JavaScript side and reference it using an index. We use this to make it possible to reference the device between JavaScript and C. */ - deviceIndex = EM_ASM_INT({ - var channels = $0; - var sampleRate = $1; - var bufferSize = $2; /* In PCM frames. */ - var isCapture = $3; - var pDevice = $4; + return MA_SUCCESS; +} - if (typeof(miniaudio) === 'undefined') { - return -1; /* Context not initialized. */ - } - var device = {}; +static ma_thread_result MA_THREADCALL ma_worker_thread(void* pData) +{ + ma_device* pDevice = (ma_device*)pData; + MA_ASSERT(pDevice != NULL); - /* The AudioContext must be created in a suspended state. */ - device.webaudio = new (window.AudioContext || window.webkitAudioContext)({sampleRate:sampleRate}); - device.webaudio.suspend(); +#ifdef MA_WIN32 + ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE); +#endif - /* - We need an intermediary buffer which we use for JavaScript and C interop. This buffer stores interleaved f32 PCM data. Because it's passed between - JavaScript and C it needs to be allocated and freed using Module._malloc() and Module._free(). - */ - device.intermediaryBufferSizeInBytes = channels * bufferSize * 4; - device.intermediaryBuffer = Module._malloc(device.intermediaryBufferSizeInBytes); - device.intermediaryBufferView = new Float32Array(Module.HEAPF32.buffer, device.intermediaryBuffer, device.intermediaryBufferSizeInBytes); + /* + When the device is being initialized it's initial state is set to MA_STATE_UNINITIALIZED. Before returning from + ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately + after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker + thread to signal an event to know when the worker thread is ready for action. + */ + ma_device__set_state(pDevice, MA_STATE_STOPPED); + ma_event_signal(&pDevice->stopEvent); - /* - Both playback and capture devices use a ScriptProcessorNode for performing per-sample operations. + for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */ + ma_stop_proc onStop; - ScriptProcessorNode is actually deprecated so this is likely to be temporary. The way this works for playback is very simple. You just set a callback - that's periodically fired, just like a normal audio callback function. But apparently this design is "flawed" and is now deprecated in favour of - something called AudioWorklets which _forces_ you to load a _separate_ .js file at run time... nice... Hopefully ScriptProcessorNode will continue to - work for years to come, but this may need to change to use AudioSourceBufferNode instead, which I think is what Emscripten uses for it's built-in SDL - implementation. I'll be avoiding that insane AudioWorklet API like the plague... + /* We wait on an event to know when something has requested that the device be started and the main loop entered. */ + ma_event_wait(&pDevice->wakeupEvent); - For capture it is a bit unintuitive. We use the ScriptProccessorNode _only_ to get the raw PCM data. It is connected to an AudioContext just like the - playback case, however we just output silence to the AudioContext instead of passing any real data. It would make more sense to me to use the - MediaRecorder API, but unfortunately you need to specify a MIME time (Opus, Vorbis, etc.) for the binary blob that's returned to the client, but I've - been unable to figure out how to get this as raw PCM. The closes I can think is to use the MIME type for WAV files and just parse it, but I don't know - how well this would work. Although ScriptProccessorNode is deprecated, in practice it seems to have pretty good browser support so I'm leaving it like - this for now. If anything knows how I could get raw PCM data using the MediaRecorder API please let me know! - */ - device.scriptNode = device.webaudio.createScriptProcessor(bufferSize, channels, channels); + /* Default result code. */ + pDevice->workResult = MA_SUCCESS; - if (isCapture) { - device.scriptNode.onaudioprocess = function(e) { - if (device.intermediaryBuffer === undefined) { - return; /* This means the device has been uninitialized. */ - } + /* If the reason for the wake up is that we are terminating, just break from the loop. */ + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { + break; + } - /* Make sure silence it output to the AudioContext destination. Not doing this will cause sound to come out of the speakers! */ - for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { - e.outputBuffer.getChannelData(iChannel).fill(0.0); - } + /* + Getting to this point means the device is wanting to get started. The function that has requested that the device + be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event + in both the success and error case. It's important that the state of the device is set _before_ signaling the event. + */ + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STARTING); - /* There are some situations where we may want to send silence to the client. */ - var sendSilence = false; - if (device.streamNode === undefined) { - sendSilence = true; - } + /* Make sure the state is set appropriately. */ + ma_device__set_state(pDevice, MA_STATE_STARTED); + ma_event_signal(&pDevice->startEvent); - /* Sanity check. This will never happen, right? */ - if (e.inputBuffer.numberOfChannels != channels) { - console.log("Capture: Channel count mismatch. " + e.inputBufer.numberOfChannels + " != " + channels + ". Sending silence."); - sendSilence = true; - } + if (pDevice->pContext->onDeviceMainLoop != NULL) { + pDevice->pContext->onDeviceMainLoop(pDevice); + } else { + ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "No main loop implementation.", MA_API_NOT_FOUND); + } - /* This looped design guards against the situation where e.inputBuffer is a different size to the original buffer size. Should never happen in practice. */ - var totalFramesProcessed = 0; - while (totalFramesProcessed < e.inputBuffer.length) { - var framesRemaining = e.inputBuffer.length - totalFramesProcessed; - var framesToProcess = framesRemaining; - if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) { - framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4); - } + /* + Getting here means we have broken from the main loop which happens the application has requested that device be stopped. Note that this + may have actually already happened above if the device was lost and miniaudio has attempted to re-initialize the device. In this case we + don't want to be doing this a second time. + */ + if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) { + if (pDevice->pContext->onDeviceStop) { + pDevice->pContext->onDeviceStop(pDevice); + } + } - /* We need to do the reverse of the playback case. We need to interleave the input data and copy it into the intermediary buffer. Then we send it to the client. */ - if (sendSilence) { - device.intermediaryBufferView.fill(0.0); - } else { - for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { - for (var iChannel = 0; iChannel < e.inputBuffer.numberOfChannels; ++iChannel) { - device.intermediaryBufferView[iFrame*channels + iChannel] = e.inputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame]; - } - } - } + /* After the device has stopped, make sure an event is posted. */ + onStop = pDevice->onStop; + if (onStop) { + onStop(pDevice); + } + + /* + A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. Note that + it's possible that the device has been uninitialized which means we need to _not_ change the status to stopped. We cannot go from an + uninitialized state to stopped state. + */ + if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) { + ma_device__set_state(pDevice, MA_STATE_STOPPED); + ma_event_signal(&pDevice->stopEvent); + } + } - /* Send data to the client from our intermediary buffer. */ - ccall("ma_device_process_pcm_frames_capture__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]); + /* Make sure we aren't continuously waiting on a stop event. */ + ma_event_signal(&pDevice->stopEvent); /* <-- Is this still needed? */ - totalFramesProcessed += framesToProcess; - } - }; +#ifdef MA_WIN32 + ma_CoUninitialize(pDevice->pContext); +#endif - navigator.mediaDevices.getUserMedia({audio:true, video:false}) - .then(function(stream) { - device.streamNode = device.webaudio.createMediaStreamSource(stream); - device.streamNode.connect(device.scriptNode); - device.scriptNode.connect(device.webaudio.destination); - }) - .catch(function(error) { - /* I think this should output silence... */ - device.scriptNode.connect(device.webaudio.destination); - }); - } else { - device.scriptNode.onaudioprocess = function(e) { - if (device.intermediaryBuffer === undefined) { - return; /* This means the device has been uninitialized. */ - } + return (ma_thread_result)0; +} - var outputSilence = false; - /* Sanity check. This will never happen, right? */ - if (e.outputBuffer.numberOfChannels != channels) { - console.log("Playback: Channel count mismatch. " + e.outputBufer.numberOfChannels + " != " + channels + ". Outputting silence."); - outputSilence = true; - return; - } +/* Helper for determining whether or not the given device is initialized. */ +static ma_bool32 ma_device__is_initialized(ma_device* pDevice) +{ + if (pDevice == NULL) { + return MA_FALSE; + } - /* This looped design guards against the situation where e.outputBuffer is a different size to the original buffer size. Should never happen in practice. */ - var totalFramesProcessed = 0; - while (totalFramesProcessed < e.outputBuffer.length) { - var framesRemaining = e.outputBuffer.length - totalFramesProcessed; - var framesToProcess = framesRemaining; - if (framesToProcess > (device.intermediaryBufferSizeInBytes/channels/4)) { - framesToProcess = (device.intermediaryBufferSizeInBytes/channels/4); - } + return ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED; +} - /* Read data from the client into our intermediary buffer. */ - ccall("ma_device_process_pcm_frames_playback__webaudio", "undefined", ["number", "number", "number"], [pDevice, framesToProcess, device.intermediaryBuffer]); - /* At this point we'll have data in our intermediary buffer which we now need to deinterleave and copy over to the output buffers. */ - if (outputSilence) { - for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { - e.outputBuffer.getChannelData(iChannel).fill(0.0); - } - } else { - for (var iChannel = 0; iChannel < e.outputBuffer.numberOfChannels; ++iChannel) { - for (var iFrame = 0; iFrame < framesToProcess; ++iFrame) { - e.outputBuffer.getChannelData(iChannel)[totalFramesProcessed + iFrame] = device.intermediaryBufferView[iFrame*channels + iChannel]; - } - } - } +#ifdef MA_WIN32 +static ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext) +{ + ma_CoUninitialize(pContext); + ma_dlclose(pContext, pContext->win32.hUser32DLL); + ma_dlclose(pContext, pContext->win32.hOle32DLL); + ma_dlclose(pContext, pContext->win32.hAdvapi32DLL); - totalFramesProcessed += framesToProcess; - } - }; + return MA_SUCCESS; +} - device.scriptNode.connect(device.webaudio.destination); - } +static ma_result ma_context_init_backend_apis__win32(ma_context* pContext) +{ +#ifdef MA_WIN32_DESKTOP + /* Ole32.dll */ + pContext->win32.hOle32DLL = ma_dlopen(pContext, "ole32.dll"); + if (pContext->win32.hOle32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; + } - return miniaudio.track_device(device); - }, (deviceType == ma_device_type_capture) ? pConfig->capture.channels : pConfig->playback.channels, pConfig->sampleRate, internalBufferSizeInFrames, deviceType == ma_device_type_capture, pDevice); + pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoInitializeEx"); + pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoUninitialize"); + pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoCreateInstance"); + pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoTaskMemFree"); + pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "PropVariantClear"); + pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "StringFromGUID2"); - if (deviceIndex < 0) { - return MA_FAILED_TO_OPEN_BACKEND_DEVICE; + + /* User32.dll */ + pContext->win32.hUser32DLL = ma_dlopen(pContext, "user32.dll"); + if (pContext->win32.hUser32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; } - if (deviceType == ma_device_type_capture) { - pDevice->webaudio.indexCapture = deviceIndex; - pDevice->capture.internalFormat = ma_format_f32; - pDevice->capture.internalChannels = pConfig->capture.channels; - ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->capture.internalChannels, pDevice->capture.internalChannelMap); - pDevice->capture.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); - pDevice->capture.internalBufferSizeInFrames = internalBufferSizeInFrames; - pDevice->capture.internalPeriods = 1; - } else { - pDevice->webaudio.indexPlayback = deviceIndex; - pDevice->playback.internalFormat = ma_format_f32; - pDevice->playback.internalChannels = pConfig->playback.channels; - ma_get_standard_channel_map(ma_standard_channel_map_webaudio, pDevice->playback.internalChannels, pDevice->playback.internalChannelMap); - pDevice->playback.internalSampleRate = EM_ASM_INT({ return miniaudio.get_device_by_index($0).webaudio.sampleRate; }, deviceIndex); - pDevice->playback.internalBufferSizeInFrames = internalBufferSizeInFrames; - pDevice->playback.internalPeriods = 1; + pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetForegroundWindow"); + pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetDesktopWindow"); + + + /* Advapi32.dll */ + pContext->win32.hAdvapi32DLL = ma_dlopen(pContext, "advapi32.dll"); + if (pContext->win32.hAdvapi32DLL == NULL) { + return MA_FAILED_TO_INIT_BACKEND; } + pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegOpenKeyExA"); + pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegCloseKey"); + pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegQueryValueExA"); +#endif + + ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE); return MA_SUCCESS; } - -ma_result ma_device_init__webaudio(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +#else +static ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext) { - ma_result result; +#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING) + ma_dlclose(pContext, pContext->posix.pthreadSO); +#else + (void)pContext; +#endif - if (pConfig->deviceType == ma_device_type_loopback) { - return MA_DEVICE_TYPE_NOT_SUPPORTED; - } + return MA_SUCCESS; +} - /* No exclusive mode with Web Audio. */ - if (((pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) && pConfig->playback.shareMode == ma_share_mode_exclusive) || - ((pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) && pConfig->capture.shareMode == ma_share_mode_exclusive)) { - return MA_SHARE_MODE_NOT_SUPPORTED; - } +static ma_result ma_context_init_backend_apis__nix(ma_context* pContext) +{ + /* pthread */ +#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING) + const char* libpthreadFileNames[] = { + "libpthread.so", + "libpthread.so.0", + "libpthread.dylib" + }; + size_t i; - if (pConfig->deviceType == ma_device_type_capture || pConfig->deviceType == ma_device_type_duplex) { - result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_capture, pDevice); - if (result != MA_SUCCESS) { - return result; + for (i = 0; i < sizeof(libpthreadFileNames) / sizeof(libpthreadFileNames[0]); ++i) { + pContext->posix.pthreadSO = ma_dlopen(pContext, libpthreadFileNames[i]); + if (pContext->posix.pthreadSO != NULL) { + break; } } - if (pConfig->deviceType == ma_device_type_playback || pConfig->deviceType == ma_device_type_duplex) { - result = ma_device_init_by_type__webaudio(pContext, pConfig, ma_device_type_playback, pDevice); - if (result != MA_SUCCESS) { - if (pConfig->deviceType == ma_device_type_duplex) { - ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); - } - return result; - } + if (pContext->posix.pthreadSO == NULL) { + return MA_FAILED_TO_INIT_BACKEND; } - /* - We need a ring buffer for moving data from the capture device to the playback device. The capture callback is the producer - and the playback callback is the consumer. The buffer needs to be large enough to hold internalBufferSizeInFrames based on - the external sample rate. - */ - if (pConfig->deviceType == ma_device_type_duplex) { - ma_uint32 rbSizeInFrames = (ma_uint32)ma_calculate_frame_count_after_src(pDevice->sampleRate, pDevice->capture.internalSampleRate, pDevice->capture.internalBufferSizeInFrames) * 2; - result = ma_pcm_rb_init(pDevice->capture.format, pDevice->capture.channels, rbSizeInFrames, NULL, &pDevice->webaudio.duplexRB); - if (result != MA_SUCCESS) { - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_capture, pDevice->webaudio.indexCapture); - } - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - ma_device_uninit_by_index__webaudio(pDevice, ma_device_type_playback, pDevice->webaudio.indexPlayback); - } - return result; - } + pContext->posix.pthread_create = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_create"); + pContext->posix.pthread_join = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_join"); + pContext->posix.pthread_mutex_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_init"); + pContext->posix.pthread_mutex_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_destroy"); + pContext->posix.pthread_mutex_lock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_lock"); + pContext->posix.pthread_mutex_unlock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_unlock"); + pContext->posix.pthread_cond_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_init"); + pContext->posix.pthread_cond_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_destroy"); + pContext->posix.pthread_cond_wait = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_wait"); + pContext->posix.pthread_cond_signal = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_signal"); + pContext->posix.pthread_attr_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_init"); + pContext->posix.pthread_attr_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_destroy"); + pContext->posix.pthread_attr_setschedpolicy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedpolicy"); + pContext->posix.pthread_attr_getschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_getschedparam"); + pContext->posix.pthread_attr_setschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedparam"); +#else + pContext->posix.pthread_create = (ma_proc)pthread_create; + pContext->posix.pthread_join = (ma_proc)pthread_join; + pContext->posix.pthread_mutex_init = (ma_proc)pthread_mutex_init; + pContext->posix.pthread_mutex_destroy = (ma_proc)pthread_mutex_destroy; + pContext->posix.pthread_mutex_lock = (ma_proc)pthread_mutex_lock; + pContext->posix.pthread_mutex_unlock = (ma_proc)pthread_mutex_unlock; + pContext->posix.pthread_cond_init = (ma_proc)pthread_cond_init; + pContext->posix.pthread_cond_destroy = (ma_proc)pthread_cond_destroy; + pContext->posix.pthread_cond_wait = (ma_proc)pthread_cond_wait; + pContext->posix.pthread_cond_signal = (ma_proc)pthread_cond_signal; + pContext->posix.pthread_attr_init = (ma_proc)pthread_attr_init; + pContext->posix.pthread_attr_destroy = (ma_proc)pthread_attr_destroy; +#if !defined(__EMSCRIPTEN__) + pContext->posix.pthread_attr_setschedpolicy = (ma_proc)pthread_attr_setschedpolicy; + pContext->posix.pthread_attr_getschedparam = (ma_proc)pthread_attr_getschedparam; + pContext->posix.pthread_attr_setschedparam = (ma_proc)pthread_attr_setschedparam; +#endif +#endif - /* We need a period to act as a buffer for cases where the playback and capture device's end up desyncing. */ - { - ma_uint32 marginSizeInFrames = rbSizeInFrames / 3; /* <-- Dividing by 3 because internalPeriods is always set to 1 for WebAudio. */ - void* pMarginData; - ma_pcm_rb_acquire_write(&pDevice->webaudio.duplexRB, &marginSizeInFrames, &pMarginData); - { - ma_zero_memory(pMarginData, marginSizeInFrames * ma_get_bytes_per_frame(pDevice->capture.format, pDevice->capture.channels)); - } - ma_pcm_rb_commit_write(&pDevice->webaudio.duplexRB, marginSizeInFrames, pMarginData); - } - } + return MA_SUCCESS; +} +#endif + +static ma_result ma_context_init_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_init_backend_apis__win32(pContext); +#else + result = ma_context_init_backend_apis__nix(pContext); +#endif + + return result; +} + +static ma_result ma_context_uninit_backend_apis(ma_context* pContext) +{ + ma_result result; +#ifdef MA_WIN32 + result = ma_context_uninit_backend_apis__win32(pContext); +#else + result = ma_context_uninit_backend_apis__nix(pContext); +#endif - return MA_SUCCESS; + return result; } -ma_result ma_device_start__webaudio(ma_device* pDevice) + +static ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext) { - ma_assert(pDevice != NULL); + return pContext->isBackendAsynchronous; +} - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - EM_ASM({ - miniaudio.get_device_by_index($0).webaudio.resume(); - }, pDevice->webaudio.indexCapture); +ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext) +{ + ma_result result; + ma_context_config config; + ma_backend defaultBackends[ma_backend_null+1]; + ma_uint32 iBackend; + ma_backend* pBackendsToIterate; + ma_uint32 backendsToIterateCount; + + if (pContext == NULL) { + return MA_INVALID_ARGS; } - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - EM_ASM({ - miniaudio.get_device_by_index($0).webaudio.resume(); - }, pDevice->webaudio.indexPlayback); + MA_ZERO_OBJECT(pContext); + + /* Always make sure the config is set first to ensure properties are available as soon as possible. */ + if (pConfig != NULL) { + config = *pConfig; + } else { + config = ma_context_config_init(); } - return MA_SUCCESS; -} + pContext->logCallback = config.logCallback; + pContext->threadPriority = config.threadPriority; + pContext->pUserData = config.pUserData; -ma_result ma_device_stop__webaudio(ma_device* pDevice) -{ - ma_assert(pDevice != NULL); + result = ma_allocation_callbacks_init_copy(&pContext->allocationCallbacks, &config.allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - EM_ASM({ - miniaudio.get_device_by_index($0).webaudio.suspend(); - }, pDevice->webaudio.indexCapture); + /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */ + result = ma_context_init_backend_apis(pContext); + if (result != MA_SUCCESS) { + return result; } - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - EM_ASM({ - miniaudio.get_device_by_index($0).webaudio.suspend(); - }, pDevice->webaudio.indexPlayback); + for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { + defaultBackends[iBackend] = (ma_backend)iBackend; } - ma_stop_proc onStop = pDevice->onStop; - if (onStop) { - onStop(pDevice); + pBackendsToIterate = (ma_backend*)backends; + backendsToIterateCount = backendCount; + if (pBackendsToIterate == NULL) { + pBackendsToIterate = (ma_backend*)defaultBackends; + backendsToIterateCount = ma_countof(defaultBackends); } - return MA_SUCCESS; + MA_ASSERT(pBackendsToIterate != NULL); + + for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { + ma_backend backend = pBackendsToIterate[iBackend]; + + result = MA_NO_BACKEND; + switch (backend) { + #ifdef MA_HAS_WASAPI + case ma_backend_wasapi: + { + result = ma_context_init__wasapi(&config, pContext); + } break; + #endif + #ifdef MA_HAS_DSOUND + case ma_backend_dsound: + { + result = ma_context_init__dsound(&config, pContext); + } break; + #endif + #ifdef MA_HAS_WINMM + case ma_backend_winmm: + { + result = ma_context_init__winmm(&config, pContext); + } break; + #endif + #ifdef MA_HAS_ALSA + case ma_backend_alsa: + { + result = ma_context_init__alsa(&config, pContext); + } break; + #endif + #ifdef MA_HAS_PULSEAUDIO + case ma_backend_pulseaudio: + { + result = ma_context_init__pulse(&config, pContext); + } break; + #endif + #ifdef MA_HAS_JACK + case ma_backend_jack: + { + result = ma_context_init__jack(&config, pContext); + } break; + #endif + #ifdef MA_HAS_COREAUDIO + case ma_backend_coreaudio: + { + result = ma_context_init__coreaudio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_SNDIO + case ma_backend_sndio: + { + result = ma_context_init__sndio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_AUDIO4 + case ma_backend_audio4: + { + result = ma_context_init__audio4(&config, pContext); + } break; + #endif + #ifdef MA_HAS_OSS + case ma_backend_oss: + { + result = ma_context_init__oss(&config, pContext); + } break; + #endif + #ifdef MA_HAS_AAUDIO + case ma_backend_aaudio: + { + result = ma_context_init__aaudio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_OPENSL + case ma_backend_opensl: + { + result = ma_context_init__opensl(&config, pContext); + } break; + #endif + #ifdef MA_HAS_WEBAUDIO + case ma_backend_webaudio: + { + result = ma_context_init__webaudio(&config, pContext); + } break; + #endif + #ifdef MA_HAS_NULL + case ma_backend_null: + { + result = ma_context_init__null(&config, pContext); + } break; + #endif + + default: break; + } + + /* If this iteration was successful, return. */ + if (result == MA_SUCCESS) { + result = ma_mutex_init(pContext, &pContext->deviceEnumLock); + if (result != MA_SUCCESS) { + ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.", MA_FAILED_TO_CREATE_MUTEX); + } + result = ma_mutex_init(pContext, &pContext->deviceInfoLock); + if (result != MA_SUCCESS) { + ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.", MA_FAILED_TO_CREATE_MUTEX); + } + +#ifdef MA_DEBUG_OUTPUT + printf("[miniaudio] Endian: %s\n", ma_is_little_endian() ? "LE" : "BE"); + printf("[miniaudio] SSE2: %s\n", ma_has_sse2() ? "YES" : "NO"); + printf("[miniaudio] AVX2: %s\n", ma_has_avx2() ? "YES" : "NO"); + printf("[miniaudio] AVX512F: %s\n", ma_has_avx512f() ? "YES" : "NO"); + printf("[miniaudio] NEON: %s\n", ma_has_neon() ? "YES" : "NO"); +#endif + + pContext->backend = backend; + return result; + } + } + + /* If we get here it means an error occurred. */ + MA_ZERO_OBJECT(pContext); /* Safety. */ + return MA_NO_BACKEND; } -ma_result ma_context_uninit__webaudio(ma_context* pContext) +ma_result ma_context_uninit(ma_context* pContext) { - ma_assert(pContext != NULL); - ma_assert(pContext->backend == ma_backend_webaudio); + if (pContext == NULL) { + return MA_INVALID_ARGS; + } - /* Nothing needs to be done here. */ - (void)pContext; + pContext->onUninit(pContext); + + ma_mutex_uninit(&pContext->deviceEnumLock); + ma_mutex_uninit(&pContext->deviceInfoLock); + ma__free_from_callbacks(pContext->pDeviceInfos, &pContext->allocationCallbacks); + ma_context_uninit_backend_apis(pContext); return MA_SUCCESS; } -ma_result ma_context_init__webaudio(const ma_context_config* pConfig, ma_context* pContext) -{ - int resultFromJS; - ma_assert(pContext != NULL); - - /* Here is where our global JavaScript object is initialized. */ - resultFromJS = EM_ASM_INT({ - if ((window.AudioContext || window.webkitAudioContext) === undefined) { - return 0; /* Web Audio not supported. */ - } +ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +{ + ma_result result; - if (typeof(miniaudio) === 'undefined') { - miniaudio = {}; - miniaudio.devices = []; /* Device cache for mapping devices to indexes for JavaScript/C interop. */ - - miniaudio.track_device = function(device) { - /* Try inserting into a free slot first. */ - for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { - if (miniaudio.devices[iDevice] == null) { - miniaudio.devices[iDevice] = device; - return iDevice; - } - } - - /* Getting here means there is no empty slots in the array so we just push to the end. */ - miniaudio.devices.push(device); - return miniaudio.devices.length - 1; - }; - - miniaudio.untrack_device_by_index = function(deviceIndex) { - /* We just set the device's slot to null. The slot will get reused in the next call to ma_track_device. */ - miniaudio.devices[deviceIndex] = null; - - /* Trim the array if possible. */ - while (miniaudio.devices.length > 0) { - if (miniaudio.devices[miniaudio.devices.length-1] == null) { - miniaudio.devices.pop(); - } else { - break; - } - } - }; - - miniaudio.untrack_device = function(device) { - for (var iDevice = 0; iDevice < miniaudio.devices.length; ++iDevice) { - if (miniaudio.devices[iDevice] == device) { - return miniaudio.untrack_device_by_index(iDevice); - } - } - }; - - miniaudio.get_device_by_index = function(deviceIndex) { - return miniaudio.devices[deviceIndex]; - }; - } - - return 1; - }, 0); /* Must pass in a dummy argument for C99 compatibility. */ + if (pContext == NULL || pContext->onEnumDevices == NULL || callback == NULL) { + return MA_INVALID_ARGS; + } - if (resultFromJS != 1) { - return MA_FAILED_TO_INIT_BACKEND; + ma_mutex_lock(&pContext->deviceEnumLock); + { + result = pContext->onEnumDevices(pContext, callback, pUserData); } + ma_mutex_unlock(&pContext->deviceEnumLock); + return result; +} - pContext->isBackendAsynchronous = MA_TRUE; - pContext->onUninit = ma_context_uninit__webaudio; - pContext->onDeviceIDEqual = ma_context_is_device_id_equal__webaudio; - pContext->onEnumDevices = ma_context_enumerate_devices__webaudio; - pContext->onGetDeviceInfo = ma_context_get_device_info__webaudio; - pContext->onDeviceInit = ma_device_init__webaudio; - pContext->onDeviceUninit = ma_device_uninit__webaudio; - pContext->onDeviceStart = ma_device_start__webaudio; - pContext->onDeviceStop = ma_device_stop__webaudio; +static ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) +{ + /* + We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device + it's just appended to the end. If it's a playback device it's inserted just before the first capture device. + */ - (void)pConfig; /* Unused. */ - return MA_SUCCESS; -} -#endif /* Web Audio */ + /* + First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a + simple fixed size increment for buffer expansion. + */ + const ma_uint32 bufferExpansionCount = 2; + const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount; + if (pContext->deviceInfoCapacity >= totalDeviceInfoCount) { + ma_uint32 oldCapacity = pContext->deviceInfoCapacity; + ma_uint32 newCapacity = oldCapacity + bufferExpansionCount; + ma_device_info* pNewInfos = (ma_device_info*)ma__realloc_from_callbacks(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity, sizeof(*pContext->pDeviceInfos)*oldCapacity, &pContext->allocationCallbacks); + if (pNewInfos == NULL) { + return MA_FALSE; /* Out of memory. */ + } + pContext->pDeviceInfos = pNewInfos; + pContext->deviceInfoCapacity = newCapacity; + } -ma_bool32 ma__is_channel_map_valid(const ma_channel* channelMap, ma_uint32 channels) -{ - /* A blank channel map should be allowed, in which case it should use an appropriate default which will depend on context. */ - if (channelMap[0] != MA_CHANNEL_NONE) { - ma_uint32 iChannel; + if (deviceType == ma_device_type_playback) { + /* Playback. Insert just before the first capture device. */ - if (channels == 0) { - return MA_FALSE; /* No channels. */ + /* The first thing to do is move all of the capture devices down a slot. */ + ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount; + size_t iCaptureDevice; + for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) { + pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1]; } - /* A channel cannot be present in the channel map more than once. */ - for (iChannel = 0; iChannel < channels; ++iChannel) { - ma_uint32 jChannel; - for (jChannel = iChannel + 1; jChannel < channels; ++jChannel) { - if (channelMap[iChannel] == channelMap[jChannel]) { - return MA_FALSE; - } - } - } + /* Now just insert where the first capture device was before moving it down a slot. */ + pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo; + pContext->playbackDeviceInfoCount += 1; + } else { + /* Capture. Insert at the end. */ + pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo; + pContext->captureDeviceInfoCount += 1; } + (void)pUserData; return MA_TRUE; } - -void ma_device__post_init_setup(ma_device* pDevice, ma_device_type deviceType) +ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount) { - ma_assert(pDevice != NULL); + ma_result result; - if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { - if (pDevice->capture.usingDefaultFormat) { - pDevice->capture.format = pDevice->capture.internalFormat; - } - if (pDevice->capture.usingDefaultChannels) { - pDevice->capture.channels = pDevice->capture.internalChannels; - } - if (pDevice->capture.usingDefaultChannelMap) { - if (pDevice->capture.internalChannels == pDevice->capture.channels) { - ma_channel_map_copy(pDevice->capture.channelMap, pDevice->capture.internalChannelMap, pDevice->capture.channels); - } else { - ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->capture.channels, pDevice->capture.channelMap); - } - } + /* Safety. */ + if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL; + if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0; + if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL; + if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0; + + if (pContext == NULL || pContext->onEnumDevices == NULL) { + return MA_INVALID_ARGS; } - if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { - if (pDevice->playback.usingDefaultFormat) { - pDevice->playback.format = pDevice->playback.internalFormat; - } - if (pDevice->playback.usingDefaultChannels) { - pDevice->playback.channels = pDevice->playback.internalChannels; - } - if (pDevice->playback.usingDefaultChannelMap) { - if (pDevice->playback.internalChannels == pDevice->playback.channels) { - ma_channel_map_copy(pDevice->playback.channelMap, pDevice->playback.internalChannelMap, pDevice->playback.channels); - } else { - ma_get_standard_channel_map(ma_standard_channel_map_default, pDevice->playback.channels, pDevice->playback.channelMap); + /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */ + ma_mutex_lock(&pContext->deviceEnumLock); + { + /* Reset everything first. */ + pContext->playbackDeviceInfoCount = 0; + pContext->captureDeviceInfoCount = 0; + + /* Now enumerate over available devices. */ + result = pContext->onEnumDevices(pContext, ma_context_get_devices__enum_callback, NULL); + if (result == MA_SUCCESS) { + /* Playback devices. */ + if (ppPlaybackDeviceInfos != NULL) { + *ppPlaybackDeviceInfos = pContext->pDeviceInfos; + } + if (pPlaybackDeviceCount != NULL) { + *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount; + } + + /* Capture devices. */ + if (ppCaptureDeviceInfos != NULL) { + *ppCaptureDeviceInfos = pContext->pDeviceInfos + pContext->playbackDeviceInfoCount; /* Capture devices come after playback devices. */ + } + if (pCaptureDeviceCount != NULL) { + *pCaptureDeviceCount = pContext->captureDeviceInfoCount; } } } + ma_mutex_unlock(&pContext->deviceEnumLock); - if (pDevice->usingDefaultSampleRate) { - if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex) { - pDevice->sampleRate = pDevice->capture.internalSampleRate; - } else { - pDevice->sampleRate = pDevice->playback.internalSampleRate; - } + return result; +} + +ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +{ + ma_device_info deviceInfo; + + /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */ + if (pContext == NULL || pDeviceInfo == NULL) { + return MA_INVALID_ARGS; } - /* PCM converters. */ - if (deviceType == ma_device_type_capture || deviceType == ma_device_type_duplex || deviceType == ma_device_type_loopback) { - /* Converting from internal device format to public format. */ - ma_pcm_converter_config converterConfig = ma_pcm_converter_config_init_new(); - converterConfig.neverConsumeEndOfInput = MA_TRUE; - converterConfig.pUserData = pDevice; - converterConfig.formatIn = pDevice->capture.internalFormat; - converterConfig.channelsIn = pDevice->capture.internalChannels; - converterConfig.sampleRateIn = pDevice->capture.internalSampleRate; - ma_channel_map_copy(converterConfig.channelMapIn, pDevice->capture.internalChannelMap, pDevice->capture.internalChannels); - converterConfig.formatOut = pDevice->capture.format; - converterConfig.channelsOut = pDevice->capture.channels; - converterConfig.sampleRateOut = pDevice->sampleRate; - ma_channel_map_copy(converterConfig.channelMapOut, pDevice->capture.channelMap, pDevice->capture.channels); - converterConfig.onRead = ma_device__pcm_converter__on_read_from_buffer_capture; - ma_pcm_converter_init(&converterConfig, &pDevice->capture.converter); + MA_ZERO_OBJECT(&deviceInfo); + + /* Help the backend out by copying over the device ID if we have one. */ + if (pDeviceID != NULL) { + MA_COPY_MEMORY(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID)); } - if (deviceType == ma_device_type_playback || deviceType == ma_device_type_duplex) { - /* Converting from public format to device format. */ - ma_pcm_converter_config converterConfig = ma_pcm_converter_config_init_new(); - converterConfig.neverConsumeEndOfInput = MA_TRUE; - converterConfig.pUserData = pDevice; - converterConfig.formatIn = pDevice->playback.format; - converterConfig.channelsIn = pDevice->playback.channels; - converterConfig.sampleRateIn = pDevice->sampleRate; - ma_channel_map_copy(converterConfig.channelMapIn, pDevice->playback.channelMap, pDevice->playback.channels); - converterConfig.formatOut = pDevice->playback.internalFormat; - converterConfig.channelsOut = pDevice->playback.internalChannels; - converterConfig.sampleRateOut = pDevice->playback.internalSampleRate; - ma_channel_map_copy(converterConfig.channelMapOut, pDevice->playback.internalChannelMap, pDevice->playback.internalChannels); - if (deviceType == ma_device_type_playback) { - if (pDevice->type == ma_device_type_playback) { - converterConfig.onRead = ma_device__on_read_from_client; - } else { - converterConfig.onRead = ma_device__pcm_converter__on_read_from_buffer_playback; - } - } else { - converterConfig.onRead = ma_device__pcm_converter__on_read_from_buffer_playback; + /* The backend may have an optimized device info retrieval function. If so, try that first. */ + if (pContext->onGetDeviceInfo != NULL) { + ma_result result; + ma_mutex_lock(&pContext->deviceInfoLock); + { + result = pContext->onGetDeviceInfo(pContext, deviceType, pDeviceID, shareMode, &deviceInfo); } - ma_pcm_converter_init(&converterConfig, &pDevice->playback.converter); - } -} + ma_mutex_unlock(&pContext->deviceInfoLock); + /* Clamp ranges. */ + deviceInfo.minChannels = ma_max(deviceInfo.minChannels, MA_MIN_CHANNELS); + deviceInfo.maxChannels = ma_min(deviceInfo.maxChannels, MA_MAX_CHANNELS); + deviceInfo.minSampleRate = ma_max(deviceInfo.minSampleRate, MA_MIN_SAMPLE_RATE); + deviceInfo.maxSampleRate = ma_min(deviceInfo.maxSampleRate, MA_MAX_SAMPLE_RATE); -ma_thread_result MA_THREADCALL ma_worker_thread(void* pData) -{ - ma_device* pDevice = (ma_device*)pData; - ma_assert(pDevice != NULL); + *pDeviceInfo = deviceInfo; + return result; + } -#ifdef MA_WIN32 - ma_CoInitializeEx(pDevice->pContext, NULL, MA_COINIT_VALUE); -#endif + /* Getting here means onGetDeviceInfo has not been set. */ + return MA_ERROR; +} - /* - When the device is being initialized it's initial state is set to MA_STATE_UNINITIALIZED. Before returning from - ma_device_init(), the state needs to be set to something valid. In miniaudio the device's default state immediately - after initialization is stopped, so therefore we need to mark the device as such. miniaudio will wait on the worker - thread to signal an event to know when the worker thread is ready for action. - */ - ma_device__set_state(pDevice, MA_STATE_STOPPED); - ma_event_signal(&pDevice->stopEvent); +ma_bool32 ma_context_is_loopback_supported(ma_context* pContext) +{ + if (pContext == NULL) { + return MA_FALSE; + } - for (;;) { /* <-- This loop just keeps the thread alive. The main audio loop is inside. */ - ma_stop_proc onStop; + return ma_is_loopback_supported(pContext->backend); +} - /* We wait on an event to know when something has requested that the device be started and the main loop entered. */ - ma_event_wait(&pDevice->wakeupEvent); - /* Default result code. */ - pDevice->workResult = MA_SUCCESS; +ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +{ + ma_result result; + ma_device_config config; - /* If the reason for the wake up is that we are terminating, just break from the loop. */ - if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { - break; - } + if (pContext == NULL) { + return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice); + } + if (pDevice == NULL) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); + } + if (pConfig == NULL) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pConfig == NULL).", MA_INVALID_ARGS); + } - /* - Getting to this point means the device is wanting to get started. The function that has requested that the device - be started will be waiting on an event (pDevice->startEvent) which means we need to make sure we signal the event - in both the success and error case. It's important that the state of the device is set _before_ signaling the event. - */ - ma_assert(ma_device__get_state(pDevice) == MA_STATE_STARTING); + /* We need to make a copy of the config so we can set default values if they were left unset in the input config. */ + config = *pConfig; - /* Make sure the state is set appropriately. */ - ma_device__set_state(pDevice, MA_STATE_STARTED); - ma_event_signal(&pDevice->startEvent); + /* Basic config validation. */ + if (config.deviceType != ma_device_type_playback && config.deviceType != ma_device_type_capture && config.deviceType != ma_device_type_duplex && config.deviceType != ma_device_type_loopback) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Device type is invalid. Make sure the device type has been set in the config.", MA_INVALID_DEVICE_CONFIG); + } - if (pDevice->pContext->onDeviceMainLoop != NULL) { - pDevice->pContext->onDeviceMainLoop(pDevice); - } else { - ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "No main loop implementation.", MA_API_NOT_FOUND); + if (config.deviceType == ma_device_type_capture || config.deviceType == ma_device_type_duplex) { + if (config.capture.channels > MA_MAX_CHANNELS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Capture channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG); } - - /* - Getting here means we have broken from the main loop which happens the application has requested that device be stopped. Note that this - may have actually already happened above if the device was lost and miniaudio has attempted to re-initialize the device. In this case we - don't want to be doing this a second time. - */ - if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) { - if (pDevice->pContext->onDeviceStop) { - pDevice->pContext->onDeviceStop(pDevice); - } + if (!ma__is_channel_map_valid(config.capture.channelMap, config.capture.channels)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Capture channel map is invalid.", MA_INVALID_DEVICE_CONFIG); } + } - /* After the device has stopped, make sure an event is posted. */ - onStop = pDevice->onStop; - if (onStop) { - onStop(pDevice); + if (config.deviceType == ma_device_type_playback || config.deviceType == ma_device_type_duplex || config.deviceType == ma_device_type_loopback) { + if (config.playback.channels > MA_MAX_CHANNELS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Playback channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG); } - - /* - A function somewhere is waiting for the device to have stopped for real so we need to signal an event to allow it to continue. Note that - it's possible that the device has been uninitialized which means we need to _not_ change the status to stopped. We cannot go from an - uninitialized state to stopped state. - */ - if (ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED) { - ma_device__set_state(pDevice, MA_STATE_STOPPED); - ma_event_signal(&pDevice->stopEvent); + if (!ma__is_channel_map_valid(config.playback.channelMap, config.playback.channels)) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Playback channel map is invalid.", MA_INVALID_DEVICE_CONFIG); } } - /* Make sure we aren't continuously waiting on a stop event. */ - ma_event_signal(&pDevice->stopEvent); /* <-- Is this still needed? */ -#ifdef MA_WIN32 - ma_CoUninitialize(pDevice->pContext); -#endif + MA_ZERO_OBJECT(pDevice); + pDevice->pContext = pContext; - return (ma_thread_result)0; -} + /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */ + pDevice->pUserData = config.pUserData; + pDevice->onData = config.dataCallback; + pDevice->onStop = config.stopCallback; + if (((ma_uintptr)pDevice % sizeof(pDevice)) != 0) { + if (pContext->logCallback) { + pContext->logCallback(pContext, pDevice, MA_LOG_LEVEL_WARNING, "WARNING: ma_device_init() called for a device that is not properly aligned. Thread safety is not supported."); + } + } -/* Helper for determining whether or not the given device is initialized. */ -ma_bool32 ma_device__is_initialized(ma_device* pDevice) -{ - if (pDevice == NULL) { - return MA_FALSE; + pDevice->noPreZeroedOutputBuffer = config.noPreZeroedOutputBuffer; + pDevice->noClip = config.noClip; + pDevice->masterVolumeFactor = 1; + + /* + When passing in 0 for the format/channels/rate/chmap it means the device will be using whatever is chosen by the backend. If everything is set + to defaults it means the format conversion pipeline will run on a fast path where data transfer is just passed straight through to the backend. + */ + if (config.sampleRate == 0) { + config.sampleRate = MA_DEFAULT_SAMPLE_RATE; + pDevice->usingDefaultSampleRate = MA_TRUE; } - return ma_device__get_state(pDevice) != MA_STATE_UNINITIALIZED; -} + if (config.capture.format == ma_format_unknown) { + config.capture.format = MA_DEFAULT_FORMAT; + pDevice->capture.usingDefaultFormat = MA_TRUE; + } + if (config.capture.channels == 0) { + config.capture.channels = MA_DEFAULT_CHANNELS; + pDevice->capture.usingDefaultChannels = MA_TRUE; + } + if (config.capture.channelMap[0] == MA_CHANNEL_NONE) { + pDevice->capture.usingDefaultChannelMap = MA_TRUE; + } + if (config.playback.format == ma_format_unknown) { + config.playback.format = MA_DEFAULT_FORMAT; + pDevice->playback.usingDefaultFormat = MA_TRUE; + } + if (config.playback.channels == 0) { + config.playback.channels = MA_DEFAULT_CHANNELS; + pDevice->playback.usingDefaultChannels = MA_TRUE; + } + if (config.playback.channelMap[0] == MA_CHANNEL_NONE) { + pDevice->playback.usingDefaultChannelMap = MA_TRUE; + } -#ifdef MA_WIN32 -ma_result ma_context_uninit_backend_apis__win32(ma_context* pContext) -{ - ma_CoUninitialize(pContext); - ma_dlclose(pContext, pContext->win32.hUser32DLL); - ma_dlclose(pContext, pContext->win32.hOle32DLL); - ma_dlclose(pContext, pContext->win32.hAdvapi32DLL); - return MA_SUCCESS; -} + /* Default buffer size. */ + if (config.bufferSizeInMilliseconds == 0 && config.bufferSizeInFrames == 0) { + config.bufferSizeInMilliseconds = (config.performanceProfile == ma_performance_profile_low_latency) ? MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY : MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE; + pDevice->usingDefaultBufferSize = MA_TRUE; + } -ma_result ma_context_init_backend_apis__win32(ma_context* pContext) -{ -#ifdef MA_WIN32_DESKTOP - /* Ole32.dll */ - pContext->win32.hOle32DLL = ma_dlopen(pContext, "ole32.dll"); - if (pContext->win32.hOle32DLL == NULL) { - return MA_FAILED_TO_INIT_BACKEND; + /* Default periods. */ + if (config.periods == 0) { + config.periods = MA_DEFAULT_PERIODS; + pDevice->usingDefaultPeriods = MA_TRUE; } - pContext->win32.CoInitializeEx = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoInitializeEx"); - pContext->win32.CoUninitialize = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoUninitialize"); - pContext->win32.CoCreateInstance = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoCreateInstance"); - pContext->win32.CoTaskMemFree = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "CoTaskMemFree"); - pContext->win32.PropVariantClear = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "PropVariantClear"); - pContext->win32.StringFromGUID2 = (ma_proc)ma_dlsym(pContext, pContext->win32.hOle32DLL, "StringFromGUID2"); + /* + Must have at least 3 periods for full-duplex mode. The idea is that the playback and capture positions hang out in the middle period, with the surrounding + periods acting as a buffer in case the capture and playback devices get's slightly out of sync. + */ + if (config.deviceType == ma_device_type_duplex && config.periods < 3) { + config.periods = 3; + } - /* User32.dll */ - pContext->win32.hUser32DLL = ma_dlopen(pContext, "user32.dll"); - if (pContext->win32.hUser32DLL == NULL) { - return MA_FAILED_TO_INIT_BACKEND; - } + pDevice->type = config.deviceType; + pDevice->sampleRate = config.sampleRate; + pDevice->resampling.algorithm = config.resampling.algorithm; + pDevice->resampling.linear.lpfCount = config.resampling.linear.lpfCount; + pDevice->resampling.speex.quality = config.resampling.speex.quality; - pContext->win32.GetForegroundWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetForegroundWindow"); - pContext->win32.GetDesktopWindow = (ma_proc)ma_dlsym(pContext, pContext->win32.hUser32DLL, "GetDesktopWindow"); + pDevice->capture.shareMode = config.capture.shareMode; + pDevice->capture.format = config.capture.format; + pDevice->capture.channels = config.capture.channels; + ma_channel_map_copy(pDevice->capture.channelMap, config.capture.channelMap, config.capture.channels); + + pDevice->playback.shareMode = config.playback.shareMode; + pDevice->playback.format = config.playback.format; + pDevice->playback.channels = config.playback.channels; + ma_channel_map_copy(pDevice->playback.channelMap, config.playback.channelMap, config.playback.channels); - /* Advapi32.dll */ - pContext->win32.hAdvapi32DLL = ma_dlopen(pContext, "advapi32.dll"); - if (pContext->win32.hAdvapi32DLL == NULL) { - return MA_FAILED_TO_INIT_BACKEND; + /* The internal format, channel count and sample rate can be modified by the backend. */ + pDevice->capture.internalFormat = pDevice->capture.format; + pDevice->capture.internalChannels = pDevice->capture.channels; + pDevice->capture.internalSampleRate = pDevice->sampleRate; + ma_channel_map_copy(pDevice->capture.internalChannelMap, pDevice->capture.channelMap, pDevice->capture.channels); + + pDevice->playback.internalFormat = pDevice->playback.format; + pDevice->playback.internalChannels = pDevice->playback.channels; + pDevice->playback.internalSampleRate = pDevice->sampleRate; + ma_channel_map_copy(pDevice->playback.internalChannelMap, pDevice->playback.channelMap, pDevice->playback.channels); + + + if (ma_mutex_init(pContext, &pDevice->lock) != MA_SUCCESS) { + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create mutex.", MA_FAILED_TO_CREATE_MUTEX); } - pContext->win32.RegOpenKeyExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegOpenKeyExA"); - pContext->win32.RegCloseKey = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegCloseKey"); - pContext->win32.RegQueryValueExA = (ma_proc)ma_dlsym(pContext, pContext->win32.hAdvapi32DLL, "RegQueryValueExA"); -#endif + /* + When the device is started, the worker thread is the one that does the actual startup of the backend device. We + use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device. + + Each of these semaphores is released internally by the worker thread when the work is completed. The start + semaphore is also used to wake up the worker thread. + */ + if (ma_event_init(pContext, &pDevice->wakeupEvent) != MA_SUCCESS) { + ma_mutex_uninit(&pDevice->lock); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread wakeup event.", MA_FAILED_TO_CREATE_EVENT); + } + if (ma_event_init(pContext, &pDevice->startEvent) != MA_SUCCESS) { + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->lock); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread start event.", MA_FAILED_TO_CREATE_EVENT); + } + if (ma_event_init(pContext, &pDevice->stopEvent) != MA_SUCCESS) { + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->lock); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread stop event.", MA_FAILED_TO_CREATE_EVENT); + } - ma_CoInitializeEx(pContext, NULL, MA_COINIT_VALUE); - return MA_SUCCESS; -} -#else -ma_result ma_context_uninit_backend_apis__nix(ma_context* pContext) -{ -#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING) - ma_dlclose(pContext, pContext->posix.pthreadSO); -#else - (void)pContext; -#endif - return MA_SUCCESS; -} + result = pContext->onDeviceInit(pContext, &config, pDevice); + if (result != MA_SUCCESS) { + return result; + } -ma_result ma_context_init_backend_apis__nix(ma_context* pContext) -{ - /* pthread */ -#if defined(MA_USE_RUNTIME_LINKING_FOR_PTHREAD) && !defined(MA_NO_RUNTIME_LINKING) - const char* libpthreadFileNames[] = { - "libpthread.so", - "libpthread.so.0", - "libpthread.dylib" - }; - size_t i; + ma_device__post_init_setup(pDevice, pConfig->deviceType); - for (i = 0; i < sizeof(libpthreadFileNames) / sizeof(libpthreadFileNames[0]); ++i) { - pContext->posix.pthreadSO = ma_dlopen(pContext, libpthreadFileNames[i]); - if (pContext->posix.pthreadSO != NULL) { - break; + + /* If the backend did not fill out a name for the device, try a generic method. */ + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + if (pDevice->capture.name[0] == '\0') { + if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_capture, config.capture.pDeviceID, pDevice->capture.name, sizeof(pDevice->capture.name)) != MA_SUCCESS) { + ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), (config.capture.pDeviceID == NULL) ? MA_DEFAULT_CAPTURE_DEVICE_NAME : "Capture Device", (size_t)-1); + } } } - - if (pContext->posix.pthreadSO == NULL) { - return MA_FAILED_TO_INIT_BACKEND; + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { + if (pDevice->playback.name[0] == '\0') { + if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_playback, config.playback.pDeviceID, pDevice->playback.name, sizeof(pDevice->playback.name)) != MA_SUCCESS) { + ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), (config.playback.pDeviceID == NULL) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : "Playback Device", (size_t)-1); + } + } } - pContext->posix.pthread_create = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_create"); - pContext->posix.pthread_join = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_join"); - pContext->posix.pthread_mutex_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_init"); - pContext->posix.pthread_mutex_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_destroy"); - pContext->posix.pthread_mutex_lock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_lock"); - pContext->posix.pthread_mutex_unlock = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_mutex_unlock"); - pContext->posix.pthread_cond_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_init"); - pContext->posix.pthread_cond_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_destroy"); - pContext->posix.pthread_cond_wait = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_wait"); - pContext->posix.pthread_cond_signal = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_cond_signal"); - pContext->posix.pthread_attr_init = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_init"); - pContext->posix.pthread_attr_destroy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_destroy"); - pContext->posix.pthread_attr_setschedpolicy = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedpolicy"); - pContext->posix.pthread_attr_getschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_getschedparam"); - pContext->posix.pthread_attr_setschedparam = (ma_proc)ma_dlsym(pContext, pContext->posix.pthreadSO, "pthread_attr_setschedparam"); -#else - pContext->posix.pthread_create = (ma_proc)pthread_create; - pContext->posix.pthread_join = (ma_proc)pthread_join; - pContext->posix.pthread_mutex_init = (ma_proc)pthread_mutex_init; - pContext->posix.pthread_mutex_destroy = (ma_proc)pthread_mutex_destroy; - pContext->posix.pthread_mutex_lock = (ma_proc)pthread_mutex_lock; - pContext->posix.pthread_mutex_unlock = (ma_proc)pthread_mutex_unlock; - pContext->posix.pthread_cond_init = (ma_proc)pthread_cond_init; - pContext->posix.pthread_cond_destroy = (ma_proc)pthread_cond_destroy; - pContext->posix.pthread_cond_wait = (ma_proc)pthread_cond_wait; - pContext->posix.pthread_cond_signal = (ma_proc)pthread_cond_signal; - pContext->posix.pthread_attr_init = (ma_proc)pthread_attr_init; - pContext->posix.pthread_attr_destroy = (ma_proc)pthread_attr_destroy; -#if !defined(__EMSCRIPTEN__) - pContext->posix.pthread_attr_setschedpolicy = (ma_proc)pthread_attr_setschedpolicy; - pContext->posix.pthread_attr_getschedparam = (ma_proc)pthread_attr_getschedparam; - pContext->posix.pthread_attr_setschedparam = (ma_proc)pthread_attr_setschedparam; -#endif -#endif - - return MA_SUCCESS; -} -#endif -ma_result ma_context_init_backend_apis(ma_context* pContext) -{ - ma_result result; -#ifdef MA_WIN32 - result = ma_context_init_backend_apis__win32(pContext); -#else - result = ma_context_init_backend_apis__nix(pContext); -#endif + /* Some backends don't require the worker thread. */ + if (!ma_context_is_backend_asynchronous(pContext)) { + /* The worker thread. */ + if (ma_thread_create(pContext, &pDevice->thread, ma_worker_thread, pDevice) != MA_SUCCESS) { + ma_device_uninit(pDevice); + return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread.", MA_FAILED_TO_CREATE_THREAD); + } - return result; -} + /* Wait for the worker thread to put the device into it's stopped state for real. */ + ma_event_wait(&pDevice->stopEvent); + } else { + ma_device__set_state(pDevice, MA_STATE_STOPPED); + } -ma_result ma_context_uninit_backend_apis(ma_context* pContext) -{ - ma_result result; -#ifdef MA_WIN32 - result = ma_context_uninit_backend_apis__win32(pContext); -#else - result = ma_context_uninit_backend_apis__nix(pContext); -#endif - return result; -} +#ifdef MA_DEBUG_OUTPUT + printf("[%s]\n", ma_get_backend_name(pDevice->pContext->backend)); + if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { + printf(" %s (%s)\n", pDevice->capture.name, "Capture"); + printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->capture.format), ma_get_format_name(pDevice->capture.internalFormat)); + printf(" Channels: %d -> %d\n", pDevice->capture.channels, pDevice->capture.internalChannels); + printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->capture.internalSampleRate); + printf(" Buffer Size: %d/%d (%d)\n", pDevice->capture.internalBufferSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods)); + printf(" Conversion:\n"); + printf(" Pre Format Conversion: %s\n", pDevice->capture.converter.hasPreFormatConversion ? "YES" : "NO"); + printf(" Post Format Conversion: %s\n", pDevice->capture.converter.hasPostFormatConversion ? "YES" : "NO"); + printf(" Channel Routing: %s\n", pDevice->capture.converter.hasChannelConverter ? "YES" : "NO"); + printf(" Resampling: %s\n", pDevice->capture.converter.hasResampler ? "YES" : "NO"); + printf(" Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO"); + } + if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { + printf(" %s (%s)\n", pDevice->playback.name, "Playback"); + printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat)); + printf(" Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels); + printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate); + printf(" Buffer Size: %d/%d (%d)\n", pDevice->playback.internalBufferSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods)); + printf(" Conversion:\n"); + printf(" Pre Format Conversion: %s\n", pDevice->playback.converter.hasPreFormatConversion ? "YES" : "NO"); + printf(" Post Format Conversion: %s\n", pDevice->playback.converter.hasPostFormatConversion ? "YES" : "NO"); + printf(" Channel Routing: %s\n", pDevice->playback.converter.hasChannelConverter ? "YES" : "NO"); + printf(" Resampling: %s\n", pDevice->playback.converter.hasResampler ? "YES" : "NO"); + printf(" Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO"); + } +#endif -ma_bool32 ma_context_is_backend_asynchronous(ma_context* pContext) -{ - return pContext->isBackendAsynchronous; + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STOPPED); + return MA_SUCCESS; } -ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pConfig, ma_context* pContext) +ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice) { ma_result result; - ma_context_config config; + ma_context* pContext; ma_backend defaultBackends[ma_backend_null+1]; ma_uint32 iBackend; ma_backend* pBackendsToIterate; ma_uint32 backendsToIterateCount; + ma_allocation_callbacks allocationCallbacks; - if (pContext == NULL) { + if (pConfig == NULL) { return MA_INVALID_ARGS; } - ma_zero_object(pContext); - - /* Always make sure the config is set first to ensure properties are available as soon as possible. */ - if (pConfig != NULL) { - config = *pConfig; + if (pContextConfig != NULL) { + result = ma_allocation_callbacks_init_copy(&allocationCallbacks, &pContextConfig->allocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } } else { - config = ma_context_config_init(); + allocationCallbacks = ma_allocation_callbacks_init_default(); } + - pContext->logCallback = config.logCallback; - pContext->threadPriority = config.threadPriority; - pContext->pUserData = config.pUserData; - - /* Backend APIs need to be initialized first. This is where external libraries will be loaded and linked. */ - result = ma_context_init_backend_apis(pContext); - if (result != MA_SUCCESS) { - return result; + pContext = (ma_context*)ma__malloc_from_callbacks(sizeof(*pContext), &allocationCallbacks); + if (pContext == NULL) { + return MA_OUT_OF_MEMORY; } for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { @@ -25875,6050 +27248,5519 @@ ma_result ma_context_init(const ma_backend backends[], ma_uint32 backendCount, c backendsToIterateCount = ma_countof(defaultBackends); } - ma_assert(pBackendsToIterate != NULL); + result = MA_NO_BACKEND; for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { - ma_backend backend = pBackendsToIterate[iBackend]; - - result = MA_NO_BACKEND; - switch (backend) { - #ifdef MA_HAS_WASAPI - case ma_backend_wasapi: - { - result = ma_context_init__wasapi(&config, pContext); - } break; - #endif - #ifdef MA_HAS_DSOUND - case ma_backend_dsound: - { - result = ma_context_init__dsound(&config, pContext); - } break; - #endif - #ifdef MA_HAS_WINMM - case ma_backend_winmm: - { - result = ma_context_init__winmm(&config, pContext); - } break; - #endif - #ifdef MA_HAS_ALSA - case ma_backend_alsa: - { - result = ma_context_init__alsa(&config, pContext); - } break; - #endif - #ifdef MA_HAS_PULSEAUDIO - case ma_backend_pulseaudio: - { - result = ma_context_init__pulse(&config, pContext); - } break; - #endif - #ifdef MA_HAS_JACK - case ma_backend_jack: - { - result = ma_context_init__jack(&config, pContext); - } break; - #endif - #ifdef MA_HAS_COREAUDIO - case ma_backend_coreaudio: - { - result = ma_context_init__coreaudio(&config, pContext); - } break; - #endif - #ifdef MA_HAS_SNDIO - case ma_backend_sndio: - { - result = ma_context_init__sndio(&config, pContext); - } break; - #endif - #ifdef MA_HAS_AUDIO4 - case ma_backend_audio4: - { - result = ma_context_init__audio4(&config, pContext); - } break; - #endif - #ifdef MA_HAS_OSS - case ma_backend_oss: - { - result = ma_context_init__oss(&config, pContext); - } break; - #endif - #ifdef MA_HAS_AAUDIO - case ma_backend_aaudio: - { - result = ma_context_init__aaudio(&config, pContext); - } break; - #endif - #ifdef MA_HAS_OPENSL - case ma_backend_opensl: - { - result = ma_context_init__opensl(&config, pContext); - } break; - #endif - #ifdef MA_HAS_WEBAUDIO - case ma_backend_webaudio: - { - result = ma_context_init__webaudio(&config, pContext); - } break; - #endif - #ifdef MA_HAS_NULL - case ma_backend_null: - { - result = ma_context_init__null(&config, pContext); - } break; - #endif - - default: break; - } - - /* If this iteration was successful, return. */ + result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext); if (result == MA_SUCCESS) { - result = ma_mutex_init(pContext, &pContext->deviceEnumLock); - if (result != MA_SUCCESS) { - ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device enumeration. ma_context_get_devices() is not thread safe.", MA_FAILED_TO_CREATE_MUTEX); - } - result = ma_mutex_init(pContext, &pContext->deviceInfoLock); - if (result != MA_SUCCESS) { - ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. ma_context_get_device_info() is not thread safe.", MA_FAILED_TO_CREATE_MUTEX); + result = ma_device_init(pContext, pConfig, pDevice); + if (result == MA_SUCCESS) { + break; /* Success. */ + } else { + ma_context_uninit(pContext); /* Failure. */ } - -#ifdef MA_DEBUG_OUTPUT - printf("[miniaudio] Endian: %s\n", ma_is_little_endian() ? "LE" : "BE"); - printf("[miniaudio] SSE2: %s\n", ma_has_sse2() ? "YES" : "NO"); - printf("[miniaudio] AVX2: %s\n", ma_has_avx2() ? "YES" : "NO"); - printf("[miniaudio] AVX512F: %s\n", ma_has_avx512f() ? "YES" : "NO"); - printf("[miniaudio] NEON: %s\n", ma_has_neon() ? "YES" : "NO"); -#endif - - pContext->backend = backend; - return result; } } - /* If we get here it means an error occurred. */ - ma_zero_object(pContext); /* Safety. */ - return MA_NO_BACKEND; -} - -ma_result ma_context_uninit(ma_context* pContext) -{ - if (pContext == NULL) { - return MA_INVALID_ARGS; + if (result != MA_SUCCESS) { + ma__free_from_callbacks(pContext, &allocationCallbacks); + return result; } - pContext->onUninit(pContext); - - ma_mutex_uninit(&pContext->deviceEnumLock); - ma_mutex_uninit(&pContext->deviceInfoLock); - ma_free(pContext->pDeviceInfos); - ma_context_uninit_backend_apis(pContext); - - return MA_SUCCESS; + pDevice->isOwnerOfContext = MA_TRUE; + return result; } - -ma_result ma_context_enumerate_devices(ma_context* pContext, ma_enum_devices_callback_proc callback, void* pUserData) +void ma_device_uninit(ma_device* pDevice) { - ma_result result; - - if (pContext == NULL || pContext->onEnumDevices == NULL || callback == NULL) { - return MA_INVALID_ARGS; + if (!ma_device__is_initialized(pDevice)) { + return; } - ma_mutex_lock(&pContext->deviceEnumLock); - { - result = pContext->onEnumDevices(pContext, callback, pUserData); + /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */ + if (ma_device_is_started(pDevice)) { + ma_device_stop(pDevice); } - ma_mutex_unlock(&pContext->deviceEnumLock); - return result; -} + /* Putting the device into an uninitialized state will make the worker thread return. */ + ma_device__set_state(pDevice, MA_STATE_UNINITIALIZED); + /* Wake up the worker thread and wait for it to properly terminate. */ + if (!ma_context_is_backend_asynchronous(pDevice->pContext)) { + ma_event_signal(&pDevice->wakeupEvent); + ma_thread_wait(&pDevice->thread); + } -ma_bool32 ma_context_get_devices__enum_callback(ma_context* pContext, ma_device_type deviceType, const ma_device_info* pInfo, void* pUserData) -{ - /* - We need to insert the device info into our main internal buffer. Where it goes depends on the device type. If it's a capture device - it's just appended to the end. If it's a playback device it's inserted just before the first capture device. - */ + pDevice->pContext->onDeviceUninit(pDevice); - /* - First make sure we have room. Since the number of devices we add to the list is usually relatively small I've decided to use a - simple fixed size increment for buffer expansion. - */ - const ma_uint32 bufferExpansionCount = 2; - const ma_uint32 totalDeviceInfoCount = pContext->playbackDeviceInfoCount + pContext->captureDeviceInfoCount; + ma_event_uninit(&pDevice->stopEvent); + ma_event_uninit(&pDevice->startEvent); + ma_event_uninit(&pDevice->wakeupEvent); + ma_mutex_uninit(&pDevice->lock); - if (pContext->deviceInfoCapacity >= totalDeviceInfoCount) { - ma_uint32 newCapacity = totalDeviceInfoCount + bufferExpansionCount; - ma_device_info* pNewInfos = (ma_device_info*)ma_realloc(pContext->pDeviceInfos, sizeof(*pContext->pDeviceInfos)*newCapacity); - if (pNewInfos == NULL) { - return MA_FALSE; /* Out of memory. */ - } + if (pDevice->isOwnerOfContext) { + ma_allocation_callbacks allocationCallbacks = pDevice->pContext->allocationCallbacks; - pContext->pDeviceInfos = pNewInfos; - pContext->deviceInfoCapacity = newCapacity; + ma_context_uninit(pDevice->pContext); + ma__free_from_callbacks(pDevice->pContext, &allocationCallbacks); } - if (deviceType == ma_device_type_playback) { - /* Playback. Insert just before the first capture device. */ - - /* The first thing to do is move all of the capture devices down a slot. */ - ma_uint32 iFirstCaptureDevice = pContext->playbackDeviceInfoCount; - size_t iCaptureDevice; - for (iCaptureDevice = totalDeviceInfoCount; iCaptureDevice > iFirstCaptureDevice; --iCaptureDevice) { - pContext->pDeviceInfos[iCaptureDevice] = pContext->pDeviceInfos[iCaptureDevice-1]; - } + MA_ZERO_OBJECT(pDevice); +} - /* Now just insert where the first capture device was before moving it down a slot. */ - pContext->pDeviceInfos[iFirstCaptureDevice] = *pInfo; - pContext->playbackDeviceInfoCount += 1; - } else { - /* Capture. Insert at the end. */ - pContext->pDeviceInfos[totalDeviceInfoCount] = *pInfo; - pContext->captureDeviceInfoCount += 1; +void ma_device_set_stop_callback(ma_device* pDevice, ma_stop_proc proc) +{ + if (pDevice == NULL) { + return; } - (void)pUserData; - return MA_TRUE; + ma_atomic_exchange_ptr(&pDevice->onStop, proc); } -ma_result ma_context_get_devices(ma_context* pContext, ma_device_info** ppPlaybackDeviceInfos, ma_uint32* pPlaybackDeviceCount, ma_device_info** ppCaptureDeviceInfos, ma_uint32* pCaptureDeviceCount) +ma_result ma_device_start(ma_device* pDevice) { ma_result result; - /* Safety. */ - if (ppPlaybackDeviceInfos != NULL) *ppPlaybackDeviceInfos = NULL; - if (pPlaybackDeviceCount != NULL) *pPlaybackDeviceCount = 0; - if (ppCaptureDeviceInfos != NULL) *ppCaptureDeviceInfos = NULL; - if (pCaptureDeviceCount != NULL) *pCaptureDeviceCount = 0; + if (pDevice == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); + } - if (pContext == NULL || pContext->onEnumDevices == NULL) { - return MA_INVALID_ARGS; + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED); } - /* Note that we don't use ma_context_enumerate_devices() here because we want to do locking at a higher level. */ - ma_mutex_lock(&pContext->deviceEnumLock); + if (ma_device__get_state(pDevice) == MA_STATE_STARTED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_start() called when the device is already started.", MA_INVALID_OPERATION); /* Already started. Returning an error to let the application know because it probably means they're doing something wrong. */ + } + + result = MA_ERROR; + ma_mutex_lock(&pDevice->lock); { - /* Reset everything first. */ - pContext->playbackDeviceInfoCount = 0; - pContext->captureDeviceInfoCount = 0; + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */ + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STOPPED); - /* Now enumerate over available devices. */ - result = pContext->onEnumDevices(pContext, ma_context_get_devices__enum_callback, NULL); - if (result == MA_SUCCESS) { - /* Playback devices. */ - if (ppPlaybackDeviceInfos != NULL) { - *ppPlaybackDeviceInfos = pContext->pDeviceInfos; - } - if (pPlaybackDeviceCount != NULL) { - *pPlaybackDeviceCount = pContext->playbackDeviceInfoCount; - } + ma_device__set_state(pDevice, MA_STATE_STARTING); - /* Capture devices. */ - if (ppCaptureDeviceInfos != NULL) { - *ppCaptureDeviceInfos = pContext->pDeviceInfos + pContext->playbackDeviceInfoCount; /* Capture devices come after playback devices. */ - } - if (pCaptureDeviceCount != NULL) { - *pCaptureDeviceCount = pContext->captureDeviceInfoCount; + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + result = pDevice->pContext->onDeviceStart(pDevice); + if (result == MA_SUCCESS) { + ma_device__set_state(pDevice, MA_STATE_STARTED); } + } else { + /* + Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the + thread and then wait for the start event. + */ + ma_event_signal(&pDevice->wakeupEvent); + + /* + Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device + into the started state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->startEvent); + result = pDevice->workResult; } } - ma_mutex_unlock(&pContext->deviceEnumLock); + ma_mutex_unlock(&pDevice->lock); return result; } -ma_result ma_context_get_device_info(ma_context* pContext, ma_device_type deviceType, const ma_device_id* pDeviceID, ma_share_mode shareMode, ma_device_info* pDeviceInfo) +ma_result ma_device_stop(ma_device* pDevice) { - ma_device_info deviceInfo; + ma_result result; - /* NOTE: Do not clear pDeviceInfo on entry. The reason is the pDeviceID may actually point to pDeviceInfo->id which will break things. */ - if (pContext == NULL || pDeviceInfo == NULL) { - return MA_INVALID_ARGS; + if (pDevice == NULL) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); } - ma_zero_object(&deviceInfo); + if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED); + } - /* Help the backend out by copying over the device ID if we have one. */ - if (pDeviceID != NULL) { - ma_copy_memory(&deviceInfo.id, pDeviceID, sizeof(*pDeviceID)); + if (ma_device__get_state(pDevice) == MA_STATE_STOPPED) { + return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_stop() called when the device is already stopped.", MA_INVALID_OPERATION); /* Already stopped. Returning an error to let the application know because it probably means they're doing something wrong. */ } - /* The backend may have an optimized device info retrieval function. If so, try that first. */ - if (pContext->onGetDeviceInfo != NULL) { - ma_result result; - ma_mutex_lock(&pContext->deviceInfoLock); - { - result = pContext->onGetDeviceInfo(pContext, deviceType, pDeviceID, shareMode, &deviceInfo); + result = MA_ERROR; + ma_mutex_lock(&pDevice->lock); + { + /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */ + MA_ASSERT(ma_device__get_state(pDevice) == MA_STATE_STARTED); + + ma_device__set_state(pDevice, MA_STATE_STOPPING); + + /* There's no need to wake up the thread like we do when starting. */ + + if (pDevice->pContext->onDeviceStop) { + result = pDevice->pContext->onDeviceStop(pDevice); + } else { + result = MA_SUCCESS; } - ma_mutex_unlock(&pContext->deviceInfoLock); - /* Clamp ranges. */ - deviceInfo.minChannels = ma_max(deviceInfo.minChannels, MA_MIN_CHANNELS); - deviceInfo.maxChannels = ma_min(deviceInfo.maxChannels, MA_MAX_CHANNELS); - deviceInfo.minSampleRate = ma_max(deviceInfo.minSampleRate, MA_MIN_SAMPLE_RATE); - deviceInfo.maxSampleRate = ma_min(deviceInfo.maxSampleRate, MA_MAX_SAMPLE_RATE); + /* Asynchronous backends need to be handled differently. */ + if (ma_context_is_backend_asynchronous(pDevice->pContext)) { + ma_device__set_state(pDevice, MA_STATE_STOPPED); + } else { + /* Synchronous backends. */ - *pDeviceInfo = deviceInfo; - return result; + /* + We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be + the one who puts the device into the stopped state. Don't call ma_device__set_state() here. + */ + ma_event_wait(&pDevice->stopEvent); + result = MA_SUCCESS; + } } + ma_mutex_unlock(&pDevice->lock); - /* Getting here means onGetDeviceInfo has not been set. */ - return MA_ERROR; + return result; } -ma_bool32 ma_context_is_loopback_supported(ma_context* pContext) +ma_bool32 ma_device_is_started(ma_device* pDevice) { - if (pContext == NULL) { + if (pDevice == NULL) { return MA_FALSE; } - return ma_is_loopback_supported(pContext->backend); + return ma_device__get_state(pDevice) == MA_STATE_STARTED; } - -ma_result ma_device_init(ma_context* pContext, const ma_device_config* pConfig, ma_device* pDevice) +ma_result ma_device_set_master_volume(ma_device* pDevice, float volume) { - ma_result result; - ma_device_config config; - - if (pContext == NULL) { - return ma_device_init_ex(NULL, 0, NULL, pConfig, pDevice); - } if (pDevice == NULL) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); - } - if (pConfig == NULL) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid arguments (pConfig == NULL).", MA_INVALID_ARGS); - } - - /* We need to make a copy of the config so we can set default values if they were left unset in the input config. */ - config = *pConfig; - - /* Basic config validation. */ - if (config.deviceType != ma_device_type_playback && config.deviceType != ma_device_type_capture && config.deviceType != ma_device_type_duplex && config.deviceType != ma_device_type_loopback) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Device type is invalid. Make sure the device type has been set in the config.", MA_INVALID_DEVICE_CONFIG); - } - - if (config.deviceType == ma_device_type_capture || config.deviceType == ma_device_type_duplex) { - if (config.capture.channels > MA_MAX_CHANNELS) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Capture channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG); - } - if (!ma__is_channel_map_valid(config.capture.channelMap, config.capture.channels)) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Capture channel map is invalid.", MA_INVALID_DEVICE_CONFIG); - } + return MA_INVALID_ARGS; } - if (config.deviceType == ma_device_type_playback || config.deviceType == ma_device_type_duplex || config.deviceType == ma_device_type_loopback) { - if (config.playback.channels > MA_MAX_CHANNELS) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with an invalid config. Playback channel count cannot exceed 32.", MA_INVALID_DEVICE_CONFIG); - } - if (!ma__is_channel_map_valid(config.playback.channelMap, config.playback.channels)) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "ma_device_init() called with invalid config. Playback channel map is invalid.", MA_INVALID_DEVICE_CONFIG); - } + if (volume < 0.0f || volume > 1.0f) { + return MA_INVALID_ARGS; } + pDevice->masterVolumeFactor = volume; - ma_zero_object(pDevice); - pDevice->pContext = pContext; - - /* Set the user data and log callback ASAP to ensure it is available for the entire initialization process. */ - pDevice->pUserData = config.pUserData; - pDevice->onData = config.dataCallback; - pDevice->onStop = config.stopCallback; + return MA_SUCCESS; +} - if (((ma_uintptr)pDevice % sizeof(pDevice)) != 0) { - if (pContext->logCallback) { - pContext->logCallback(pContext, pDevice, MA_LOG_LEVEL_WARNING, "WARNING: ma_device_init() called for a device that is not properly aligned. Thread safety is not supported."); - } +ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume) +{ + if (pDevice == NULL || pVolume == NULL) { + return MA_INVALID_ARGS; } - pDevice->noPreZeroedOutputBuffer = config.noPreZeroedOutputBuffer; - pDevice->noClip = config.noClip; - pDevice->masterVolumeFactor = 1; - - /* - When passing in 0 for the format/channels/rate/chmap it means the device will be using whatever is chosen by the backend. If everything is set - to defaults it means the format conversion pipeline will run on a fast path where data transfer is just passed straight through to the backend. - */ - if (config.sampleRate == 0) { - config.sampleRate = MA_DEFAULT_SAMPLE_RATE; - pDevice->usingDefaultSampleRate = MA_TRUE; - } + *pVolume = pDevice->masterVolumeFactor; - if (config.capture.format == ma_format_unknown) { - config.capture.format = MA_DEFAULT_FORMAT; - pDevice->capture.usingDefaultFormat = MA_TRUE; - } - if (config.capture.channels == 0) { - config.capture.channels = MA_DEFAULT_CHANNELS; - pDevice->capture.usingDefaultChannels = MA_TRUE; - } - if (config.capture.channelMap[0] == MA_CHANNEL_NONE) { - pDevice->capture.usingDefaultChannelMap = MA_TRUE; - } + return MA_SUCCESS; +} - if (config.playback.format == ma_format_unknown) { - config.playback.format = MA_DEFAULT_FORMAT; - pDevice->playback.usingDefaultFormat = MA_TRUE; - } - if (config.playback.channels == 0) { - config.playback.channels = MA_DEFAULT_CHANNELS; - pDevice->playback.usingDefaultChannels = MA_TRUE; - } - if (config.playback.channelMap[0] == MA_CHANNEL_NONE) { - pDevice->playback.usingDefaultChannelMap = MA_TRUE; +ma_result ma_device_set_master_gain_db(ma_device* pDevice, float gainDB) +{ + if (gainDB > 0) { + return MA_INVALID_ARGS; } + return ma_device_set_master_volume(pDevice, ma_gain_db_to_factor(gainDB)); +} - /* Default buffer size. */ - if (config.bufferSizeInMilliseconds == 0 && config.bufferSizeInFrames == 0) { - config.bufferSizeInMilliseconds = (config.performanceProfile == ma_performance_profile_low_latency) ? MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_LOW_LATENCY : MA_BASE_BUFFER_SIZE_IN_MILLISECONDS_CONSERVATIVE; - pDevice->usingDefaultBufferSize = MA_TRUE; - } +ma_result ma_device_get_master_gain_db(ma_device* pDevice, float* pGainDB) +{ + float factor; + ma_result result; - /* Default periods. */ - if (config.periods == 0) { - config.periods = MA_DEFAULT_PERIODS; - pDevice->usingDefaultPeriods = MA_TRUE; + if (pGainDB == NULL) { + return MA_INVALID_ARGS; } - /* - Must have at least 3 periods for full-duplex mode. The idea is that the playback and capture positions hang out in the middle period, with the surrounding - periods acting as a buffer in case the capture and playback devices get's slightly out of sync. - */ - if (config.deviceType == ma_device_type_duplex && config.periods < 3) { - config.periods = 3; + result = ma_device_get_master_volume(pDevice, &factor); + if (result != MA_SUCCESS) { + return result; } + *pGainDB = ma_factor_to_gain_db(factor); - pDevice->type = config.deviceType; - pDevice->sampleRate = config.sampleRate; + return MA_SUCCESS; +} - pDevice->capture.shareMode = config.capture.shareMode; - pDevice->capture.format = config.capture.format; - pDevice->capture.channels = config.capture.channels; - ma_channel_map_copy(pDevice->capture.channelMap, config.capture.channelMap, config.capture.channels); - pDevice->playback.shareMode = config.playback.shareMode; - pDevice->playback.format = config.playback.format; - pDevice->playback.channels = config.playback.channels; - ma_channel_map_copy(pDevice->playback.channelMap, config.playback.channelMap, config.playback.channels); +ma_context_config ma_context_config_init() +{ + ma_context_config config; + MA_ZERO_OBJECT(&config); + return config; +} - /* The internal format, channel count and sample rate can be modified by the backend. */ - pDevice->capture.internalFormat = pDevice->capture.format; - pDevice->capture.internalChannels = pDevice->capture.channels; - pDevice->capture.internalSampleRate = pDevice->sampleRate; - ma_channel_map_copy(pDevice->capture.internalChannelMap, pDevice->capture.channelMap, pDevice->capture.channels); +ma_device_config ma_device_config_init(ma_device_type deviceType) +{ + ma_device_config config; + MA_ZERO_OBJECT(&config); + config.deviceType = deviceType; - pDevice->playback.internalFormat = pDevice->playback.format; - pDevice->playback.internalChannels = pDevice->playback.channels; - pDevice->playback.internalSampleRate = pDevice->sampleRate; - ma_channel_map_copy(pDevice->playback.internalChannelMap, pDevice->playback.channelMap, pDevice->playback.channels); - + /* Resampling defaults. We must never use the Speex backend by default because it uses licensed third party code. */ + config.resampling.algorithm = ma_resample_algorithm_linear; + config.resampling.linear.lpfCount = ma_min(2, MA_MAX_RESAMPLER_LPF_FILTERS); + config.resampling.speex.quality = 3; - if (ma_mutex_init(pContext, &pDevice->lock) != MA_SUCCESS) { - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create mutex.", MA_FAILED_TO_CREATE_MUTEX); - } + return config; +} +#endif /* MA_NO_DEVICE_IO */ - /* - When the device is started, the worker thread is the one that does the actual startup of the backend device. We - use a semaphore to wait for the background thread to finish the work. The same applies for stopping the device. - - Each of these semaphores is released internally by the worker thread when the work is completed. The start - semaphore is also used to wake up the worker thread. - */ - if (ma_event_init(pContext, &pDevice->wakeupEvent) != MA_SUCCESS) { - ma_mutex_uninit(&pDevice->lock); - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread wakeup event.", MA_FAILED_TO_CREATE_EVENT); - } - if (ma_event_init(pContext, &pDevice->startEvent) != MA_SUCCESS) { - ma_event_uninit(&pDevice->wakeupEvent); - ma_mutex_uninit(&pDevice->lock); - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread start event.", MA_FAILED_TO_CREATE_EVENT); - } - if (ma_event_init(pContext, &pDevice->stopEvent) != MA_SUCCESS) { - ma_event_uninit(&pDevice->startEvent); - ma_event_uninit(&pDevice->wakeupEvent); - ma_mutex_uninit(&pDevice->lock); - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread stop event.", MA_FAILED_TO_CREATE_EVENT); - } +/************************************************************************************************************************************************************** - result = pContext->onDeviceInit(pContext, &config, pDevice); - if (result != MA_SUCCESS) { - return MA_NO_BACKEND; /* The error message will have been posted with ma_post_error() by the source of the error so don't bother calling it here. */ - } +Biquad Filter - ma_device__post_init_setup(pDevice, pConfig->deviceType); +**************************************************************************************************************************************************************/ +#ifndef MA_BIQUAD_FIXED_POINT_SHIFT +#define MA_BIQUAD_FIXED_POINT_SHIFT 14 +#endif +static ma_int32 ma_biquad_float_to_fp(double x) +{ + return (ma_int32)(x * (1 << MA_BIQUAD_FIXED_POINT_SHIFT)); +} - /* If the backend did not fill out a name for the device, try a generic method. */ - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - if (pDevice->capture.name[0] == '\0') { - if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_capture, config.capture.pDeviceID, pDevice->capture.name, sizeof(pDevice->capture.name)) != MA_SUCCESS) { - ma_strncpy_s(pDevice->capture.name, sizeof(pDevice->capture.name), (config.capture.pDeviceID == NULL) ? MA_DEFAULT_CAPTURE_DEVICE_NAME : "Capture Device", (size_t)-1); - } - } - } - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex || pDevice->type == ma_device_type_loopback) { - if (pDevice->playback.name[0] == '\0') { - if (ma_context__try_get_device_name_by_id(pContext, ma_device_type_playback, config.playback.pDeviceID, pDevice->playback.name, sizeof(pDevice->playback.name)) != MA_SUCCESS) { - ma_strncpy_s(pDevice->playback.name, sizeof(pDevice->playback.name), (config.playback.pDeviceID == NULL) ? MA_DEFAULT_PLAYBACK_DEVICE_NAME : "Playback Device", (size_t)-1); - } - } - } +ma_biquad_config ma_biquad_config_init(ma_format format, ma_uint32 channels, double b0, double b1, double b2, double a0, double a1, double a2) +{ + ma_biquad_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.b0 = b0; + config.b1 = b1; + config.b2 = b2; + config.a0 = a0; + config.a1 = a1; + config.a2 = a2; - /* Some backends don't require the worker thread. */ - if (!ma_context_is_backend_asynchronous(pContext)) { - /* The worker thread. */ - if (ma_thread_create(pContext, &pDevice->thread, ma_worker_thread, pDevice) != MA_SUCCESS) { - ma_device_uninit(pDevice); - return ma_context_post_error(pContext, NULL, MA_LOG_LEVEL_ERROR, "Failed to create worker thread.", MA_FAILED_TO_CREATE_THREAD); - } + return config; +} - /* Wait for the worker thread to put the device into it's stopped state for real. */ - ma_event_wait(&pDevice->stopEvent); - } else { - ma_device__set_state(pDevice, MA_STATE_STOPPED); +ma_result ma_biquad_init(const ma_biquad_config* pConfig, ma_biquad* pBQ) +{ + if (pBQ == NULL) { + return MA_INVALID_ARGS; } + MA_ZERO_OBJECT(pBQ); -#ifdef MA_DEBUG_OUTPUT - printf("[%s]\n", ma_get_backend_name(pDevice->pContext->backend)); - if (pDevice->type == ma_device_type_capture || pDevice->type == ma_device_type_duplex) { - printf(" %s (%s)\n", pDevice->capture.name, "Capture"); - printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->capture.format), ma_get_format_name(pDevice->capture.internalFormat)); - printf(" Channels: %d -> %d\n", pDevice->capture.channels, pDevice->capture.internalChannels); - printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->capture.internalSampleRate); - printf(" Buffer Size: %d/%d (%d)\n", pDevice->capture.internalBufferSizeInFrames, pDevice->capture.internalPeriods, (pDevice->capture.internalBufferSizeInFrames / pDevice->capture.internalPeriods)); - printf(" Conversion:\n"); - printf(" Pre Format Conversion: %s\n", pDevice->capture.converter.isPreFormatConversionRequired ? "YES" : "NO"); - printf(" Post Format Conversion: %s\n", pDevice->capture.converter.isPostFormatConversionRequired ? "YES" : "NO"); - printf(" Channel Routing: %s\n", pDevice->capture.converter.isChannelRoutingRequired ? "YES" : "NO"); - printf(" SRC: %s\n", pDevice->capture.converter.isSRCRequired ? "YES" : "NO"); - printf(" Channel Routing at Start: %s\n", pDevice->capture.converter.isChannelRoutingAtStart ? "YES" : "NO"); - printf(" Passthrough: %s\n", pDevice->capture.converter.isPassthrough ? "YES" : "NO"); - } - if (pDevice->type == ma_device_type_playback || pDevice->type == ma_device_type_duplex) { - printf(" %s (%s)\n", pDevice->playback.name, "Playback"); - printf(" Format: %s -> %s\n", ma_get_format_name(pDevice->playback.format), ma_get_format_name(pDevice->playback.internalFormat)); - printf(" Channels: %d -> %d\n", pDevice->playback.channels, pDevice->playback.internalChannels); - printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->playback.internalSampleRate); - printf(" Buffer Size: %d/%d (%d)\n", pDevice->playback.internalBufferSizeInFrames, pDevice->playback.internalPeriods, (pDevice->playback.internalBufferSizeInFrames / pDevice->playback.internalPeriods)); - printf(" Conversion:\n"); - printf(" Pre Format Conversion: %s\n", pDevice->playback.converter.isPreFormatConversionRequired ? "YES" : "NO"); - printf(" Post Format Conversion: %s\n", pDevice->playback.converter.isPostFormatConversionRequired ? "YES" : "NO"); - printf(" Channel Routing: %s\n", pDevice->playback.converter.isChannelRoutingRequired ? "YES" : "NO"); - printf(" SRC: %s\n", pDevice->playback.converter.isSRCRequired ? "YES" : "NO"); - printf(" Channel Routing at Start: %s\n", pDevice->playback.converter.isChannelRoutingAtStart ? "YES" : "NO"); - printf(" Passthrough: %s\n", pDevice->playback.converter.isPassthrough ? "YES" : "NO"); + if (pConfig == NULL) { + return MA_INVALID_ARGS; } -#endif - - ma_assert(ma_device__get_state(pDevice) == MA_STATE_STOPPED); - return MA_SUCCESS; + return ma_biquad_reinit(pConfig, pBQ); } -ma_result ma_device_init_ex(const ma_backend backends[], ma_uint32 backendCount, const ma_context_config* pContextConfig, const ma_device_config* pConfig, ma_device* pDevice) +ma_result ma_biquad_reinit(const ma_biquad_config* pConfig, ma_biquad* pBQ) { - ma_result result; - ma_context* pContext; - ma_backend defaultBackends[ma_backend_null+1]; - ma_uint32 iBackend; - ma_backend* pBackendsToIterate; - ma_uint32 backendsToIterateCount; - - if (pConfig == NULL) { + if (pBQ == NULL || pConfig == NULL) { return MA_INVALID_ARGS; } - pContext = (ma_context*)ma_malloc(sizeof(*pContext)); - if (pContext == NULL) { - return MA_OUT_OF_MEMORY; + if (pConfig->a0 == 0) { + return MA_INVALID_ARGS; /* Division by zero. */ } - for (iBackend = 0; iBackend <= ma_backend_null; ++iBackend) { - defaultBackends[iBackend] = (ma_backend)iBackend; + /* Only supporting f32 and s16. */ + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; } - pBackendsToIterate = (ma_backend*)backends; - backendsToIterateCount = backendCount; - if (pBackendsToIterate == NULL) { - pBackendsToIterate = (ma_backend*)defaultBackends; - backendsToIterateCount = ma_countof(defaultBackends); + /* The format cannot be changed after initialization. */ + if (pBQ->format != ma_format_unknown && pBQ->format != pConfig->format) { + return MA_INVALID_OPERATION; } - result = MA_NO_BACKEND; - - for (iBackend = 0; iBackend < backendsToIterateCount; ++iBackend) { - result = ma_context_init(&pBackendsToIterate[iBackend], 1, pContextConfig, pContext); - if (result == MA_SUCCESS) { - result = ma_device_init(pContext, pConfig, pDevice); - if (result == MA_SUCCESS) { - break; /* Success. */ - } else { - ma_context_uninit(pContext); /* Failure. */ - } - } + /* The channel count cannot be changed after initialization. */ + if (pBQ->channels != 0 && pBQ->channels != pConfig->channels) { + return MA_INVALID_OPERATION; } - if (result != MA_SUCCESS) { - ma_free(pContext); - return result; + + pBQ->format = pConfig->format; + pBQ->channels = pConfig->channels; + + /* Normalize. */ + if (pConfig->format == ma_format_f32) { + pBQ->b0.f32 = (float)(pConfig->b0 / pConfig->a0); + pBQ->b1.f32 = (float)(pConfig->b1 / pConfig->a0); + pBQ->b2.f32 = (float)(pConfig->b2 / pConfig->a0); + pBQ->a1.f32 = (float)(pConfig->a1 / pConfig->a0); + pBQ->a2.f32 = (float)(pConfig->a2 / pConfig->a0); + } else { + pBQ->b0.s32 = ma_biquad_float_to_fp(pConfig->b0 / pConfig->a0); + pBQ->b1.s32 = ma_biquad_float_to_fp(pConfig->b1 / pConfig->a0); + pBQ->b2.s32 = ma_biquad_float_to_fp(pConfig->b2 / pConfig->a0); + pBQ->a1.s32 = ma_biquad_float_to_fp(pConfig->a1 / pConfig->a0); + pBQ->a2.s32 = ma_biquad_float_to_fp(pConfig->a2 / pConfig->a0); } - pDevice->isOwnerOfContext = MA_TRUE; - return result; + return MA_SUCCESS; } -void ma_device_uninit(ma_device* pDevice) +static MA_INLINE void ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(ma_biquad* pBQ, float* pY, const float* pX) { - if (!ma_device__is_initialized(pDevice)) { - return; + ma_uint32 c; + const float b0 = pBQ->b0.f32; + const float b1 = pBQ->b1.f32; + const float b2 = pBQ->b2.f32; + const float a1 = pBQ->a1.f32; + const float a2 = pBQ->a2.f32; + + for (c = 0; c < pBQ->channels; c += 1) { + float r1 = pBQ->r1[c].f32; + float r2 = pBQ->r2[c].f32; + float x = pX[c]; + float y; + + y = b0*x + r1; + r1 = b1*x - a1*y + r2; + r2 = b2*x - a2*y; + + pY[c] = y; + pBQ->r1[c].f32 = r1; + pBQ->r2[c].f32 = r2; } +} - /* Make sure the device is stopped first. The backends will probably handle this naturally, but I like to do it explicitly for my own sanity. */ - if (ma_device_is_started(pDevice)) { - ma_device_stop(pDevice); +static MA_INLINE void ma_biquad_process_pcm_frame_f32(ma_biquad* pBQ, float* pY, const float* pX) +{ + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); +} + +static MA_INLINE void ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_uint32 c; + const ma_int32 b0 = pBQ->b0.s32; + const ma_int32 b1 = pBQ->b1.s32; + const ma_int32 b2 = pBQ->b2.s32; + const ma_int32 a1 = pBQ->a1.s32; + const ma_int32 a2 = pBQ->a2.s32; + + for (c = 0; c < pBQ->channels; c += 1) { + ma_int32 r1 = pBQ->r1[c].s32; + ma_int32 r2 = pBQ->r2[c].s32; + ma_int32 x = pX[c]; + ma_int32 y; + + y = (b0*x + r1) >> MA_BIQUAD_FIXED_POINT_SHIFT; + r1 = (b1*x - a1*y + r2); + r2 = (b2*x - a2*y); + + pY[c] = (ma_int16)ma_clamp(y, -32768, 32767); + pBQ->r1[c].s32 = r1; + pBQ->r2[c].s32 = r2; } +} - /* Putting the device into an uninitialized state will make the worker thread return. */ - ma_device__set_state(pDevice, MA_STATE_UNINITIALIZED); +static MA_INLINE void ma_biquad_process_pcm_frame_s16(ma_biquad* pBQ, ma_int16* pY, const ma_int16* pX) +{ + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); +} - /* Wake up the worker thread and wait for it to properly terminate. */ - if (!ma_context_is_backend_asynchronous(pDevice->pContext)) { - ma_event_signal(&pDevice->wakeupEvent); - ma_thread_wait(&pDevice->thread); +ma_result ma_biquad_process_pcm_frames(ma_biquad* pBQ, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + ma_uint32 n; + + if (pBQ == NULL || pFramesOut == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; } - pDevice->pContext->onDeviceUninit(pDevice); + /* Note that the logic below needs to support in-place filtering. That is, it must support the case where pFramesOut and pFramesIn are the same. */ - ma_event_uninit(&pDevice->stopEvent); - ma_event_uninit(&pDevice->startEvent); - ma_event_uninit(&pDevice->wakeupEvent); - ma_mutex_uninit(&pDevice->lock); + if (pBQ->format == ma_format_f32) { + /* */ float* pY = ( float*)pFramesOut; + const float* pX = (const float*)pFramesIn; - if (pDevice->isOwnerOfContext) { - ma_context_uninit(pDevice->pContext); - ma_free(pDevice->pContext); + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_f32__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else if (pBQ->format == ma_format_s16) { + /* */ ma_int16* pY = ( ma_int16*)pFramesOut; + const ma_int16* pX = (const ma_int16*)pFramesIn; + + for (n = 0; n < frameCount; n += 1) { + ma_biquad_process_pcm_frame_s16__direct_form_2_transposed(pBQ, pY, pX); + pY += pBQ->channels; + pX += pBQ->channels; + } + } else { + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; /* Format not supported. Should never hit this because it's checked in ma_biquad_init() and ma_biquad_reinit(). */ } - ma_zero_object(pDevice); + return MA_SUCCESS; } -void ma_device_set_stop_callback(ma_device* pDevice, ma_stop_proc proc) +ma_uint32 ma_biquad_get_latency(ma_biquad* pBQ) { - if (pDevice == NULL) { - return; + if (pBQ == NULL) { + return 0; } - ma_atomic_exchange_ptr(&pDevice->onStop, proc); + return 2; +} + + +/************************************************************************************************************************************************************** + +Low-Pass Filter + +**************************************************************************************************************************************************************/ +ma_lpf_config ma_lpf_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRate, double cutoffFrequency) +{ + ma_lpf_config config; + + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRate = sampleRate; + config.cutoffFrequency = cutoffFrequency; + + return config; +} + +static MA_INLINE ma_biquad_config ma_lpf__get_biquad_config(const ma_lpf_config* pConfig) +{ + ma_biquad_config bqConfig; + double q; + double w; + double s; + double c; + double a; + + MA_ASSERT(pConfig != NULL); + + q = 1 / sqrt(2); + w = 2 * MA_PI_D * pConfig->cutoffFrequency / pConfig->sampleRate; + s = sin(w); + c = cos(w); + a = s / (2*q); + + bqConfig.b0 = (1 - c) / 2; + bqConfig.b1 = 1 - c; + bqConfig.b2 = (1 - c) / 2; + bqConfig.a0 = 1 + a; + bqConfig.a1 = -2 * c; + bqConfig.a2 = 1 - a; + + bqConfig.format = pConfig->format; + bqConfig.channels = pConfig->channels; + + return bqConfig; } -ma_result ma_device_start(ma_device* pDevice) +ma_result ma_lpf_init(const ma_lpf_config* pConfig, ma_lpf* pLPF) { ma_result result; + ma_biquad_config bqConfig; - if (pDevice == NULL) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); + if (pLPF == NULL) { + return MA_INVALID_ARGS; } - if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_start() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED); - } + MA_ZERO_OBJECT(pLPF); - if (ma_device__get_state(pDevice) == MA_STATE_STARTED) { - return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_start() called when the device is already started.", MA_INVALID_OPERATION); /* Already started. Returning an error to let the application know because it probably means they're doing something wrong. */ + if (pConfig == NULL) { + return MA_INVALID_ARGS; } - result = MA_ERROR; - ma_mutex_lock(&pDevice->lock); - { - /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a stopped or paused state. */ - ma_assert(ma_device__get_state(pDevice) == MA_STATE_STOPPED); - - ma_device__set_state(pDevice, MA_STATE_STARTING); - - /* Asynchronous backends need to be handled differently. */ - if (ma_context_is_backend_asynchronous(pDevice->pContext)) { - result = pDevice->pContext->onDeviceStart(pDevice); - if (result == MA_SUCCESS) { - ma_device__set_state(pDevice, MA_STATE_STARTED); - } - } else { - /* - Synchronous backends are started by signaling an event that's being waited on in the worker thread. We first wake up the - thread and then wait for the start event. - */ - ma_event_signal(&pDevice->wakeupEvent); - - /* - Wait for the worker thread to finish starting the device. Note that the worker thread will be the one who puts the device - into the started state. Don't call ma_device__set_state() here. - */ - ma_event_wait(&pDevice->startEvent); - result = pDevice->workResult; - } + bqConfig = ma_lpf__get_biquad_config(pConfig); + result = ma_biquad_init(&bqConfig, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; } - ma_mutex_unlock(&pDevice->lock); - return result; + return MA_SUCCESS; } -ma_result ma_device_stop(ma_device* pDevice) +ma_result ma_lpf_reinit(const ma_lpf_config* pConfig, ma_lpf* pLPF) { ma_result result; + ma_biquad_config bqConfig; - if (pDevice == NULL) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called with invalid arguments (pDevice == NULL).", MA_INVALID_ARGS); - } - - if (ma_device__get_state(pDevice) == MA_STATE_UNINITIALIZED) { - return ma_post_error(pDevice, MA_LOG_LEVEL_ERROR, "ma_device_stop() called for an uninitialized device.", MA_DEVICE_NOT_INITIALIZED); + if (pLPF == NULL || pConfig == NULL) { + return MA_INVALID_ARGS; } - if (ma_device__get_state(pDevice) == MA_STATE_STOPPED) { - return ma_post_error(pDevice, MA_LOG_LEVEL_WARNING, "ma_device_stop() called when the device is already stopped.", MA_INVALID_OPERATION); /* Already stopped. Returning an error to let the application know because it probably means they're doing something wrong. */ + bqConfig = ma_lpf__get_biquad_config(pConfig); + result = ma_biquad_reinit(&bqConfig, &pLPF->bq); + if (result != MA_SUCCESS) { + return result; } - result = MA_ERROR; - ma_mutex_lock(&pDevice->lock); - { - /* Starting and stopping are wrapped in a mutex which means we can assert that the device is in a started or paused state. */ - ma_assert(ma_device__get_state(pDevice) == MA_STATE_STARTED); + return MA_SUCCESS; +} - ma_device__set_state(pDevice, MA_STATE_STOPPING); +static MA_INLINE void ma_lpf_process_pcm_frame_s16(ma_lpf* pLPF, ma_int16* pFrameOut, const ma_int16* pFrameIn) +{ + ma_biquad_process_pcm_frame_s16(&pLPF->bq, pFrameOut, pFrameIn); +} - /* There's no need to wake up the thread like we do when starting. */ +static MA_INLINE void ma_lpf_process_pcm_frame_f32(ma_lpf* pLPF, float* pFrameOut, const float* pFrameIn) +{ + ma_biquad_process_pcm_frame_f32(&pLPF->bq, pFrameOut, pFrameIn); +} - if (pDevice->pContext->onDeviceStop) { - result = pDevice->pContext->onDeviceStop(pDevice); - } else { - result = MA_SUCCESS; - } +ma_result ma_lpf_process_pcm_frames(ma_lpf* pLPF, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) +{ + if (pLPF == NULL) { + return MA_INVALID_ARGS; + } - /* Asynchronous backends need to be handled differently. */ - if (ma_context_is_backend_asynchronous(pDevice->pContext)) { - ma_device__set_state(pDevice, MA_STATE_STOPPED); - } else { - /* Synchronous backends. */ + return ma_biquad_process_pcm_frames(&pLPF->bq, pFramesOut, pFramesIn, frameCount); +} - /* - We need to wait for the worker thread to become available for work before returning. Note that the worker thread will be - the one who puts the device into the stopped state. Don't call ma_device__set_state() here. - */ - ma_event_wait(&pDevice->stopEvent); - result = MA_SUCCESS; - } +ma_uint32 ma_lpf_get_latency(ma_lpf* pLPF) +{ + if (pLPF == NULL) { + return 0; } - ma_mutex_unlock(&pDevice->lock); - return result; + return ma_biquad_get_latency(&pLPF->bq); } -ma_bool32 ma_device_is_started(ma_device* pDevice) + +/************************************************************************************************************************************************************** + +Resampling + +**************************************************************************************************************************************************************/ +ma_linear_resampler_config ma_linear_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) { - if (pDevice == NULL) { - return MA_FALSE; - } + ma_linear_resampler_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.lpfCount = 1; + config.lpfNyquistFactor = 1; - return ma_device__get_state(pDevice) == MA_STATE_STARTED; + return config; } -ma_result ma_device_set_master_volume(ma_device* pDevice, float volume) +static ma_result ma_linear_resampler_set_rate_internal(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_bool32 isResamplerAlreadyInitialized) { - if (pDevice == NULL) { + ma_uint32 gcf; + + if (pResampler == NULL) { return MA_INVALID_ARGS; } - if (volume < 0.0f || volume > 1.0f) { + if (sampleRateIn == 0 || sampleRateOut == 0) { return MA_INVALID_ARGS; } - pDevice->masterVolumeFactor = volume; + /* Simplify the sample rate. */ + gcf = ma_gcf_u32(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut); + pResampler->config.sampleRateIn /= gcf; + pResampler->config.sampleRateOut /= gcf; - return MA_SUCCESS; -} + if (pResampler->config.lpfCount > 0) { + ma_result result; + ma_uint32 iFilter; + ma_uint32 lpfSampleRate; + double lpfCutoffFrequency; + ma_lpf_config lpfConfig; -ma_result ma_device_get_master_volume(ma_device* pDevice, float* pVolume) -{ - if (pDevice == NULL || pVolume == NULL) { - return MA_INVALID_ARGS; + if (pResampler->config.lpfCount > MA_MAX_RESAMPLER_LPF_FILTERS) { + return MA_INVALID_ARGS; + } + + lpfSampleRate = (ma_uint32)(ma_max(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut)); + lpfCutoffFrequency = ( double)(ma_min(pResampler->config.sampleRateIn, pResampler->config.sampleRateOut) * 0.5 * pResampler->config.lpfNyquistFactor); + + lpfConfig = ma_lpf_config_init(pResampler->config.format, pResampler->config.channels, lpfSampleRate, lpfCutoffFrequency); + + /* + If the resampler is alreay initialized we don't want to do a fresh initialization of the low-pass filter because it will result in the cached frames + getting cleared. Instead we re-initialize the filter which will maintain any cached frames. + */ + result = MA_SUCCESS; + for (iFilter = 0; iFilter < pResampler->config.lpfCount; iFilter += 1) { + if (isResamplerAlreadyInitialized) { + result = ma_lpf_reinit(&lpfConfig, &pResampler->lpf[iFilter]); + } else { + result = ma_lpf_init(&lpfConfig, &pResampler->lpf[iFilter]); + } + + if (result != MA_SUCCESS) { + break; + } + } + + if (result != MA_SUCCESS) { + return result; /* Failed to initialize the low-pass filter. */ + } } - *pVolume = pDevice->masterVolumeFactor; + pResampler->inAdvanceInt = pResampler->config.sampleRateIn / pResampler->config.sampleRateOut; + pResampler->inAdvanceFrac = pResampler->config.sampleRateIn % pResampler->config.sampleRateOut; + + /* Make sure the fractional part is less than the output sample rate. */ + pResampler->inTimeInt += pResampler->inTimeFrac / pResampler->config.sampleRateOut; + pResampler->inTimeFrac = pResampler->inTimeFrac % pResampler->config.sampleRateOut; return MA_SUCCESS; } -ma_result ma_device_set_master_gain_db(ma_device* pDevice, float gainDB) +ma_result ma_linear_resampler_init(const ma_linear_resampler_config* pConfig, ma_linear_resampler* pResampler) { - if (gainDB > 0) { + ma_result result; + + if (pResampler == NULL) { return MA_INVALID_ARGS; } - return ma_device_set_master_volume(pDevice, ma_gain_db_to_factor(gainDB)); -} - -ma_result ma_device_get_master_gain_db(ma_device* pDevice, float* pGainDB) -{ - float factor; - ma_result result; + MA_ZERO_OBJECT(pResampler); - if (pGainDB == NULL) { + if (pConfig == NULL) { return MA_INVALID_ARGS; } - result = ma_device_get_master_volume(pDevice, &factor); + pResampler->config = *pConfig; + + /* Setting the rate will set up the filter and time advances for us. */ + result = ma_linear_resampler_set_rate_internal(pResampler, pConfig->sampleRateIn, pConfig->sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_FALSE); if (result != MA_SUCCESS) { return result; } - *pGainDB = ma_factor_to_gain_db(factor); + pResampler->inTimeInt = 1; /* Set this to one to force an input sample to always be loaded for the first output frame. */ + pResampler->inTimeFrac = 0; return MA_SUCCESS; } +void ma_linear_resampler_uninit(ma_linear_resampler* pResampler) +{ + if (pResampler == NULL) { + return; + } +} -ma_context_config ma_context_config_init() +static MA_INLINE ma_int16 ma_linear_resampler_mix_s16(ma_int16 x, ma_int16 y, ma_int32 a, const ma_int32 shift) { - ma_context_config config; - ma_zero_object(&config); + ma_int32 b; + ma_int32 c; + ma_int32 r; - return config; + MA_ASSERT(a <= (1<> shift); } -ma_device_config ma_device_config_init(ma_device_type deviceType) +static void ma_linear_resampler_interpolate_frame_s16(ma_linear_resampler* pResampler, ma_int16* pFrameOut) { - ma_device_config config; - ma_zero_object(&config); - config.deviceType = deviceType; + ma_uint32 c; + ma_uint32 a; + const ma_uint32 shift = 12; - return config; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); + + a = (pResampler->inTimeFrac << shift) / pResampler->config.sampleRateOut; + + for (c = 0; c < pResampler->config.channels; c += 1) { + ma_int16 s = ma_linear_resampler_mix_s16(pResampler->x0.s16[c], pResampler->x1.s16[c], a, shift); + pFrameOut[c] = s; + } } -#endif /* MA_NO_DEVICE_IO */ -void ma_get_standard_channel_map_microsoft(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +static void ma_linear_resampler_interpolate_frame_f32(ma_linear_resampler* pResampler, float* pFrameOut) { - /* Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */ - switch (channels) - { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + ma_uint32 c; + float a; - case 2: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - } break; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameOut != NULL); - case 3: /* Not defined, but best guess. */ - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - } break; + a = (float)pResampler->inTimeFrac / pResampler->config.sampleRateOut; - case 4: - { -#ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP - /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */ - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_BACK_CENTER; -#else - /* Quad. */ - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; -#endif - } break; + for (c = 0; c < pResampler->config.channels; c += 1) { + float s = ma_mix_f32_fast(pResampler->x0.f32[c], pResampler->x1.f32[c], a); + pFrameOut[c] = s; + } +} - case 5: /* Not defined, but best guess. */ - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_BACK_LEFT; - channelMap[4] = MA_CHANNEL_BACK_RIGHT; - } break; +static ma_result ma_linear_resampler_process_pcm_frames_s16_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; - case 6: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_LFE; - channelMap[4] = MA_CHANNEL_SIDE_LEFT; - channelMap[5] = MA_CHANNEL_SIDE_RIGHT; - } break; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); - case 7: /* Not defined, but best guess. */ - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_LFE; - channelMap[4] = MA_CHANNEL_BACK_CENTER; - channelMap[5] = MA_CHANNEL_SIDE_LEFT; - channelMap[6] = MA_CHANNEL_SIDE_RIGHT; - } break; + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; - case 8: - default: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_LFE; - channelMap[4] = MA_CHANNEL_BACK_LEFT; - channelMap[5] = MA_CHANNEL_BACK_RIGHT; - channelMap[6] = MA_CHANNEL_SIDE_LEFT; - channelMap[7] = MA_CHANNEL_SIDE_RIGHT; - } break; - } + for (;;) { + if (framesProcessedOut >= frameCountOut) { + break; + } + + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iFilter; + ma_uint32 iChannel; + + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } - /* Remainder. */ - if (channels > 8) { - ma_uint32 iChannel; - for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + /* Filter. */ + for (iFilter = 0; iFilter < pResampler->config.lpfCount; iFilter += 1) { + ma_lpf_process_pcm_frame_s16(&pResampler->lpf[iFilter], pResampler->x1.s16, pResampler->x1.s16); + } + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; } } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; } -void ma_get_standard_channel_map_alsa(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +static ma_result ma_linear_resampler_process_pcm_frames_s16_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - switch (channels) - { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + const ma_int16* pFramesInS16; + /* */ ma_int16* pFramesOutS16; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; - case 2: - { - channelMap[0] = MA_CHANNEL_LEFT; - channelMap[1] = MA_CHANNEL_RIGHT; - } break; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); - case 3: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - } break; + pFramesInS16 = (const ma_int16*)pFramesIn; + pFramesOutS16 = ( ma_int16*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; - case 4: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - } break; + for (;;) { + ma_uint32 iFilter; - case 5: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - } break; + if (framesProcessedOut >= frameCountOut) { + break; + } - case 6: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_LFE; - } break; + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iChannel; - case 7: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_LFE; - channelMap[6] = MA_CHANNEL_BACK_CENTER; - } break; + if (pFramesInS16 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = pFramesInS16[iChannel]; + } + pFramesInS16 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.s16[iChannel] = pResampler->x1.s16[iChannel]; + pResampler->x1.s16[iChannel] = 0; + } + } - case 8: - default: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_LFE; - channelMap[6] = MA_CHANNEL_SIDE_LEFT; - channelMap[7] = MA_CHANNEL_SIDE_RIGHT; - } break; - } + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } - /* Remainder. */ - if (channels > 8) { - ma_uint32 iChannel; - for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutS16 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_s16(pResampler, pFramesOutS16); + + /* Filter. */ + for (iFilter = 0; iFilter < pResampler->config.lpfCount; iFilter += 1) { + ma_lpf_process_pcm_frame_s16(&pResampler->lpf[iFilter], pFramesOutS16, pFramesOutS16); + } + + pFramesOutS16 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; } } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; } -void ma_get_standard_channel_map_rfc3551(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +static ma_result ma_linear_resampler_process_pcm_frames_s16(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - switch (channels) - { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + MA_ASSERT(pResampler != NULL); - case 2: - { - channelMap[0] = MA_CHANNEL_LEFT; - channelMap[1] = MA_CHANNEL_RIGHT; - } break; + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_s16_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_s16_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} - case 3: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - } break; - case 4: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_CENTER; - channelMap[2] = MA_CHANNEL_FRONT_RIGHT; - channelMap[3] = MA_CHANNEL_BACK_CENTER; - } break; +static ma_result ma_linear_resampler_process_pcm_frames_f32_downsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; - case 5: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_BACK_LEFT; - channelMap[4] = MA_CHANNEL_BACK_RIGHT; - } break; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); - case 6: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_SIDE_LEFT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_FRONT_RIGHT; - channelMap[4] = MA_CHANNEL_SIDE_RIGHT; - channelMap[5] = MA_CHANNEL_BACK_CENTER; - } break; - } + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; - /* Remainder. */ - if (channels > 8) { - ma_uint32 iChannel; - for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6)); + for (;;) { + if (framesProcessedOut >= frameCountOut) { + break; + } + + /* Before interpolating we need to load the buffers. When doing this we need to ensure we run every input sample through the filter. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iFilter; + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + /* Filter. */ + for (iFilter = 0; iFilter < pResampler->config.lpfCount; iFilter += 1) { + ma_lpf_process_pcm_frame_f32(&pResampler->lpf[iFilter], pResampler->x1.f32, pResampler->x1.f32); + } + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and filtered and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; } } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; } -void ma_get_standard_channel_map_flac(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +static ma_result ma_linear_resampler_process_pcm_frames_f32_upsample(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - switch (channels) - { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + const float* pFramesInF32; + /* */ float* pFramesOutF32; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; - case 2: - { - channelMap[0] = MA_CHANNEL_LEFT; - channelMap[1] = MA_CHANNEL_RIGHT; - } break; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFrameCountIn != NULL); + MA_ASSERT(pFrameCountOut != NULL); - case 3: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - } break; + pFramesInF32 = (const float*)pFramesIn; + pFramesOutF32 = ( float*)pFramesOut; + frameCountIn = *pFrameCountIn; + frameCountOut = *pFrameCountOut; + framesProcessedIn = 0; + framesProcessedOut = 0; - case 4: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - } break; + for (;;) { + ma_uint32 iFilter; - case 5: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_BACK_LEFT; - channelMap[4] = MA_CHANNEL_BACK_RIGHT; - } break; + if (framesProcessedOut >= frameCountOut) { + break; + } - case 6: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_LFE; - channelMap[4] = MA_CHANNEL_BACK_LEFT; - channelMap[5] = MA_CHANNEL_BACK_RIGHT; - } break; + /* Before interpolating we need to load the buffers. */ + while (pResampler->inTimeInt > 0 && frameCountIn > 0) { + ma_uint32 iChannel; + + if (pFramesInF32 != NULL) { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = pFramesInF32[iChannel]; + } + pFramesInF32 += pResampler->config.channels; + } else { + for (iChannel = 0; iChannel < pResampler->config.channels; iChannel += 1) { + pResampler->x0.f32[iChannel] = pResampler->x1.f32[iChannel]; + pResampler->x1.f32[iChannel] = 0; + } + } + + frameCountIn -= 1; + framesProcessedIn += 1; + pResampler->inTimeInt -= 1; + } + + if (pResampler->inTimeInt > 0) { + break; /* Ran out of input data. */ + } + + /* Getting here means the frames have been loaded and we can generate the next output frame. */ + if (pFramesOutF32 != NULL) { + MA_ASSERT(pResampler->inTimeInt == 0); + ma_linear_resampler_interpolate_frame_f32(pResampler, pFramesOutF32); + + /* Filter. */ + for (iFilter = 0; iFilter < pResampler->config.lpfCount; iFilter += 1) { + ma_lpf_process_pcm_frame_f32(&pResampler->lpf[iFilter], pFramesOutF32, pFramesOutF32); + } + + pFramesOutF32 += pResampler->config.channels; + } + + framesProcessedOut += 1; + + /* Advance time forward. */ + pResampler->inTimeInt += pResampler->inAdvanceInt; + pResampler->inTimeFrac += pResampler->inAdvanceFrac; + if (pResampler->inTimeFrac >= pResampler->config.sampleRateOut) { + pResampler->inTimeFrac -= pResampler->config.sampleRateOut; + pResampler->inTimeInt += 1; + } + } + + *pFrameCountIn = framesProcessedIn; + *pFrameCountOut = framesProcessedOut; + + return MA_SUCCESS; +} + +static ma_result ma_linear_resampler_process_pcm_frames_f32(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + + if (pResampler->config.sampleRateIn > pResampler->config.sampleRateOut) { + return ma_linear_resampler_process_pcm_frames_f32_downsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + return ma_linear_resampler_process_pcm_frames_f32_upsample(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } +} - case 7: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_LFE; - channelMap[4] = MA_CHANNEL_BACK_CENTER; - channelMap[5] = MA_CHANNEL_SIDE_LEFT; - channelMap[6] = MA_CHANNEL_SIDE_RIGHT; - } break; - case 8: - default: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - channelMap[3] = MA_CHANNEL_LFE; - channelMap[4] = MA_CHANNEL_BACK_LEFT; - channelMap[5] = MA_CHANNEL_BACK_RIGHT; - channelMap[6] = MA_CHANNEL_SIDE_LEFT; - channelMap[7] = MA_CHANNEL_SIDE_RIGHT; - } break; +ma_result ma_linear_resampler_process_pcm_frames(ma_linear_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pResampler == NULL) { + return MA_INVALID_ARGS; } - /* Remainder. */ - if (channels > 8) { - ma_uint32 iChannel; - for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); - } + /* */ if (pResampler->config.format == ma_format_s16) { + return ma_linear_resampler_process_pcm_frames_s16(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else if (pResampler->config.format == ma_format_f32) { + return ma_linear_resampler_process_pcm_frames_f32(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Should never get here. Getting here means the format is not supported and you didn't check the return value of ma_linear_resampler_init(). */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; } } -void ma_get_standard_channel_map_vorbis(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) + +ma_result ma_linear_resampler_set_rate(ma_linear_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) { - /* In Vorbis' type 0 channel mapping, the first two channels are not always the standard left/right - it will have the center speaker where the right usually goes. Why?! */ - switch (channels) - { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + return ma_linear_resampler_set_rate_internal(pResampler, sampleRateIn, sampleRateOut, /* isResamplerAlreadyInitialized = */ MA_TRUE); +} - case 2: - { - channelMap[0] = MA_CHANNEL_LEFT; - channelMap[1] = MA_CHANNEL_RIGHT; - } break; +ma_result ma_linear_resampler_set_rate_ratio(ma_linear_resampler* pResampler, float ratioInOut) +{ + ma_uint32 n; + ma_uint32 d; - case 3: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_CENTER; - channelMap[2] = MA_CHANNEL_FRONT_RIGHT; - } break; + d = 1000000; /* We use up to 6 decimal places. */ + n = (ma_uint32)(ratioInOut * d); - case 4: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - } break; + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } - case 5: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_CENTER; - channelMap[2] = MA_CHANNEL_FRONT_RIGHT; - channelMap[3] = MA_CHANNEL_BACK_LEFT; - channelMap[4] = MA_CHANNEL_BACK_RIGHT; - } break; + MA_ASSERT(n != 0); + + return ma_linear_resampler_set_rate(pResampler, n, d); +} - case 6: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_CENTER; - channelMap[2] = MA_CHANNEL_FRONT_RIGHT; - channelMap[3] = MA_CHANNEL_BACK_LEFT; - channelMap[4] = MA_CHANNEL_BACK_RIGHT; - channelMap[5] = MA_CHANNEL_LFE; - } break; - case 7: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_CENTER; - channelMap[2] = MA_CHANNEL_FRONT_RIGHT; - channelMap[3] = MA_CHANNEL_SIDE_LEFT; - channelMap[4] = MA_CHANNEL_SIDE_RIGHT; - channelMap[5] = MA_CHANNEL_BACK_CENTER; - channelMap[6] = MA_CHANNEL_LFE; - } break; +ma_uint64 ma_linear_resampler_get_required_input_frame_count(ma_linear_resampler* pResampler, ma_uint64 outputFrameCount) +{ + ma_uint64 count; - case 8: - default: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_CENTER; - channelMap[2] = MA_CHANNEL_FRONT_RIGHT; - channelMap[3] = MA_CHANNEL_SIDE_LEFT; - channelMap[4] = MA_CHANNEL_SIDE_RIGHT; - channelMap[5] = MA_CHANNEL_BACK_LEFT; - channelMap[6] = MA_CHANNEL_BACK_RIGHT; - channelMap[7] = MA_CHANNEL_LFE; - } break; + if (pResampler == NULL) { + return 0; } - /* Remainder. */ - if (channels > 8) { - ma_uint32 iChannel; - for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); - } + if (outputFrameCount == 0) { + return 0; } + + /* Any whole input frames are consumed before the first output frame is generated. */ + count = pResampler->inTimeInt; + outputFrameCount -= 1; + + /* The rest of the output frames can be calculated in constant time. */ + count += outputFrameCount * pResampler->inAdvanceInt; + count += (pResampler->inTimeFrac + (outputFrameCount * pResampler->inAdvanceFrac)) / pResampler->config.sampleRateOut; + + return count; } -void ma_get_standard_channel_map_sound4(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +ma_uint64 ma_linear_resampler_get_expected_output_frame_count(ma_linear_resampler* pResampler, ma_uint64 inputFrameCount) { - switch (channels) - { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + ma_uint64 outputFrameCount; + ma_uint64 inTimeInt; + ma_uint64 inTimeFrac; - case 2: - { - channelMap[0] = MA_CHANNEL_LEFT; - channelMap[1] = MA_CHANNEL_RIGHT; - } break; + if (pResampler == NULL) { + return 0; + } - case 3: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_CENTER; - } break; + /* TODO: Try making this run in constant time. */ - case 4: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - } break; + outputFrameCount = 0; + inTimeInt = pResampler->inTimeInt; + inTimeFrac = pResampler->inTimeFrac; - case 5: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - } break; + for (;;) { + while (inTimeInt > 0 && inputFrameCount > 0) { + inputFrameCount -= 1; + inTimeInt -= 1; + } - case 6: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_LFE; - } break; + if (inTimeInt > 0) { + break; + } - case 7: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_BACK_CENTER; - channelMap[6] = MA_CHANNEL_LFE; - } break; + outputFrameCount += 1; - case 8: - default: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_LFE; - channelMap[6] = MA_CHANNEL_SIDE_LEFT; - channelMap[7] = MA_CHANNEL_SIDE_RIGHT; - } break; + /* Advance time forward. */ + inTimeInt += pResampler->inAdvanceInt; + inTimeFrac += pResampler->inAdvanceFrac; + if (inTimeFrac >= pResampler->config.sampleRateOut) { + inTimeFrac -= pResampler->config.sampleRateOut; + inTimeInt += 1; + } } - /* Remainder. */ - if (channels > 8) { - ma_uint32 iChannel; - for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); - } + return outputFrameCount; +} + +ma_uint64 ma_linear_resampler_get_input_latency(ma_linear_resampler* pResampler) +{ + ma_uint32 latency; + ma_uint32 iFilter; + + if (pResampler == NULL) { + return 0; + } + + latency = 1; + for (iFilter = 0; iFilter < pResampler->config.lpfCount; iFilter += 1) { + latency += ma_lpf_get_latency(&pResampler->lpf[iFilter]); } + + return latency; } -void ma_get_standard_channel_map_sndio(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +ma_uint64 ma_linear_resampler_get_output_latency(ma_linear_resampler* pResampler) { - switch (channels) + if (pResampler == NULL) { + return 0; + } + + return ma_linear_resampler_get_input_latency(pResampler) * pResampler->config.sampleRateOut / pResampler->config.sampleRateIn; +} + + +#if defined(ma_speex_resampler_h) +#define MA_HAS_SPEEX_RESAMPLER + +static ma_result ma_result_from_speex_err(int err) +{ + switch (err) { - case 1: - { - channelMap[0] = MA_CHANNEL_MONO; - } break; + case RESAMPLER_ERR_SUCCESS: return MA_SUCCESS; + case RESAMPLER_ERR_ALLOC_FAILED: return MA_OUT_OF_MEMORY; + case RESAMPLER_ERR_BAD_STATE: return MA_ERROR; + case RESAMPLER_ERR_INVALID_ARG: return MA_INVALID_ARGS; + case RESAMPLER_ERR_PTR_OVERLAP: return MA_INVALID_ARGS; + case RESAMPLER_ERR_OVERFLOW: return MA_ERROR; + default: return MA_ERROR; + } +} +#endif /* ma_speex_resampler_h */ - case 2: - { - channelMap[0] = MA_CHANNEL_LEFT; - channelMap[1] = MA_CHANNEL_RIGHT; - } break; +ma_resampler_config ma_resampler_config_init(ma_format format, ma_uint32 channels, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_resample_algorithm algorithm) +{ + ma_resampler_config config; - case 3: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_FRONT_CENTER; - } break; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channels = channels; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + config.algorithm = algorithm; - case 4: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - } break; + /* Linear. */ + config.linear.lpfCount = 1; + config.linear.lpfNyquistFactor = 1; - case 5: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - } break; + /* Speex. */ + config.speex.quality = 3; /* Cannot leave this as 0 as that is actually a valid value for Speex resampling quality. */ + + return config; +} + +ma_result ma_resampler_init(const ma_resampler_config* pConfig, ma_resampler* pResampler) +{ + ma_result result; + + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } - case 6: - default: - { - channelMap[0] = MA_CHANNEL_FRONT_LEFT; - channelMap[1] = MA_CHANNEL_FRONT_RIGHT; - channelMap[2] = MA_CHANNEL_BACK_LEFT; - channelMap[3] = MA_CHANNEL_BACK_RIGHT; - channelMap[4] = MA_CHANNEL_FRONT_CENTER; - channelMap[5] = MA_CHANNEL_LFE; - } break; + MA_ZERO_OBJECT(pResampler); + + if (pConfig == NULL) { + return MA_INVALID_ARGS; } - /* Remainder. */ - if (channels > 6) { - ma_uint32 iChannel; - for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) { - channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6)); - } + if (pConfig->format != ma_format_f32 && pConfig->format != ma_format_s16) { + return MA_INVALID_ARGS; } -} -void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) -{ - switch (standardChannelMap) + pResampler->config = *pConfig; + + switch (pConfig->algorithm) { - case ma_standard_channel_map_alsa: + case ma_resample_algorithm_linear: { - ma_get_standard_channel_map_alsa(channels, channelMap); - } break; + ma_linear_resampler_config linearConfig; + linearConfig = ma_linear_resampler_config_init(pConfig->format, pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut); + linearConfig.lpfCount = pConfig->linear.lpfCount; + linearConfig.lpfNyquistFactor = pConfig->linear.lpfNyquistFactor; - case ma_standard_channel_map_rfc3551: - { - ma_get_standard_channel_map_rfc3551(channels, channelMap); + result = ma_linear_resampler_init(&linearConfig, &pResampler->state.linear); + if (result != MA_SUCCESS) { + return result; + } } break; - case ma_standard_channel_map_flac: + case ma_resample_algorithm_speex: { - ma_get_standard_channel_map_flac(channels, channelMap); + #if defined(MA_HAS_SPEEX_RESAMPLER) + int speexErr; + pResampler->state.speex.pSpeexResamplerState = speex_resampler_init(pConfig->channels, pConfig->sampleRateIn, pConfig->sampleRateOut, pConfig->speex.quality, &speexErr); + if (pResampler->state.speex.pSpeexResamplerState == NULL) { + return ma_result_from_speex_err(speexErr); + } + #else + /* Speex resampler not available. */ + return MA_NO_BACKEND; + #endif } break; - case ma_standard_channel_map_vorbis: - { - ma_get_standard_channel_map_vorbis(channels, channelMap); - } break; + default: return MA_INVALID_ARGS; + } - case ma_standard_channel_map_sound4: - { - ma_get_standard_channel_map_sound4(channels, channelMap); - } break; - - case ma_standard_channel_map_sndio: - { - ma_get_standard_channel_map_sndio(channels, channelMap); - } break; + return MA_SUCCESS; +} - case ma_standard_channel_map_microsoft: - default: - { - ma_get_standard_channel_map_microsoft(channels, channelMap); - } break; +void ma_resampler_uninit(ma_resampler* pResampler) +{ + if (pResampler == NULL) { + return; + } + + if (pResampler->config.algorithm == ma_resample_algorithm_linear) { + ma_linear_resampler_uninit(&pResampler->state.linear); } + +#if defined(MA_HAS_SPEEX_RESAMPLER) + if (pResampler->config.algorithm == ma_resample_algorithm_speex) { + speex_resampler_destroy((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState); + } +#endif } -void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels) +static ma_result ma_resampler_process_pcm_frames__read__linear(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - if (pOut != NULL && pIn != NULL && channels > 0) { - ma_copy_memory(pOut, pIn, sizeof(*pOut) * channels); - } + return ma_linear_resampler_process_pcm_frames(&pResampler->state.linear, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); } -ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]) +#if defined(MA_HAS_SPEEX_RESAMPLER) +static ma_result ma_resampler_process_pcm_frames__read__speex(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - if (channelMap == NULL) { - return MA_FALSE; - } + int speexErr; + ma_uint64 frameCountOut; + ma_uint64 frameCountIn; + ma_uint64 framesProcessedOut; + ma_uint64 framesProcessedIn; + unsigned int framesPerIteration = UINT_MAX; - /* A channel count of 0 is invalid. */ - if (channels == 0) { - return MA_FALSE; - } + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFrameCountOut != NULL); + MA_ASSERT(pFrameCountIn != NULL); - /* It does not make sense to have a mono channel when there is more than 1 channel. */ - if (channels > 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; ++iChannel) { - if (channelMap[iChannel] == MA_CHANNEL_MONO) { - return MA_FALSE; - } + /* + Reading from the Speex resampler requires a bit of dancing around for a few reasons. The first thing is that it's frame counts + are in unsigned int's whereas ours is in ma_uint64. We therefore need to run the conversion in a loop. The other, more complicated + problem, is that we need to keep track of the input time, similar to what we do with the linear resampler. The reason we need to + do this is for ma_resampler_get_required_input_frame_count() and ma_resampler_get_expected_output_frame_count(). + */ + frameCountOut = *pFrameCountOut; + frameCountIn = *pFrameCountIn; + framesProcessedOut = 0; + framesProcessedIn = 0; + + while (framesProcessedOut < frameCountOut && framesProcessedIn < frameCountIn) { + unsigned int frameCountInThisIteration; + unsigned int frameCountOutThisIteration; + const void* pFramesInThisIteration; + void* pFramesOutThisIteration; + + frameCountInThisIteration = framesPerIteration; + if ((ma_uint64)frameCountInThisIteration > (frameCountIn - framesProcessedIn)) { + frameCountInThisIteration = (unsigned int)(frameCountIn - framesProcessedIn); + } + + frameCountOutThisIteration = framesPerIteration; + if ((ma_uint64)frameCountOutThisIteration > (frameCountOut - framesProcessedOut)) { + frameCountOutThisIteration = (unsigned int)(frameCountOut - framesProcessedOut); + } + + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels)); + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels)); + + if (pResampler->config.format == ma_format_f32) { + speexErr = speex_resampler_process_interleaved_float((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, (const float*)pFramesInThisIteration, &frameCountInThisIteration, (float*)pFramesOutThisIteration, &frameCountOutThisIteration); + } else if (pResampler->config.format == ma_format_s16) { + speexErr = speex_resampler_process_interleaved_int((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, (const spx_int16_t*)pFramesInThisIteration, &frameCountInThisIteration, (spx_int16_t*)pFramesOutThisIteration, &frameCountOutThisIteration); + } else { + /* Format not supported. Should never get here. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; } + + if (speexErr != RESAMPLER_ERR_SUCCESS) { + return ma_result_from_speex_err(speexErr); + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; } - return MA_TRUE; + *pFrameCountOut = framesProcessedOut; + *pFrameCountIn = framesProcessedIn; + + return MA_SUCCESS; } +#endif -ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]) +static ma_result ma_resampler_process_pcm_frames__read(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - ma_uint32 iChannel; + MA_ASSERT(pResampler != NULL); + MA_ASSERT(pFramesOut != NULL); - if (channelMapA == channelMapB) { - return MA_FALSE; + /* pFramesOut is not NULL, which means we must have a capacity. */ + if (pFrameCountOut == NULL) { + return MA_INVALID_ARGS; } - if (channels == 0 || channels > MA_MAX_CHANNELS) { - return MA_FALSE; + /* It doesn't make sense to not have any input frames to process. */ + if (pFrameCountIn == NULL || pFramesIn == NULL) { + return MA_INVALID_ARGS; } - for (iChannel = 0; iChannel < channels; ++iChannel) { - if (channelMapA[iChannel] != channelMapB[iChannel]) { - return MA_FALSE; + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_resampler_process_pcm_frames__read__linear(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return ma_resampler_process_pcm_frames__read__speex(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + #else + break; + #endif } + + default: break; } - return MA_TRUE; + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; } -ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]) -{ - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; ++iChannel) { - if (channelMap[iChannel] != MA_CHANNEL_NONE) { - return MA_FALSE; - } - } +static ma_result ma_resampler_process_pcm_frames__seek__linear(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); - return MA_TRUE; + /* Seeking is supported natively by the linear resampler. */ + return ma_linear_resampler_process_pcm_frames(&pResampler->state.linear, pFramesIn, pFrameCountIn, NULL, pFrameCountOut); } -ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition) +#if defined(MA_HAS_SPEEX_RESAMPLER) +static ma_result ma_resampler_process_pcm_frames__seek__speex(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; ++iChannel) { - if (channelMap[iChannel] == channelPosition) { - return MA_TRUE; + /* The generic seek method is implemented in on top of ma_resampler_process_pcm_frames__read() by just processing into a dummy buffer. */ + float devnull[8192]; + ma_uint64 totalOutputFramesToProcess; + ma_uint64 totalOutputFramesProcessed; + ma_uint64 totalInputFramesProcessed; + ma_uint32 bpf; + ma_result result; + + MA_ASSERT(pResampler != NULL); + + totalOutputFramesProcessed = 0; + totalInputFramesProcessed = 0; + bpf = ma_get_bytes_per_frame(pResampler->config.format, pResampler->config.channels); + + if (pFrameCountOut != NULL) { + /* Seek by output frames. */ + totalOutputFramesToProcess = *pFrameCountOut; + } else { + /* Seek by input frames. */ + MA_ASSERT(pFrameCountIn != NULL); + totalOutputFramesToProcess = ma_resampler_get_expected_output_frame_count(pResampler, *pFrameCountIn); + } + + if (pFramesIn != NULL) { + /* Process input data. */ + MA_ASSERT(pFrameCountIn != NULL); + while (totalOutputFramesProcessed < totalOutputFramesToProcess && totalInputFramesProcessed < *pFrameCountIn) { + ma_uint64 inputFramesToProcessThisIteration = (*pFrameCountIn - totalInputFramesProcessed); + ma_uint64 outputFramesToProcessThisIteration = (totalOutputFramesToProcess - totalOutputFramesProcessed); + if (outputFramesToProcessThisIteration > sizeof(devnull) / bpf) { + outputFramesToProcessThisIteration = sizeof(devnull) / bpf; + } + + result = ma_resampler_process_pcm_frames__read(pResampler, ma_offset_ptr(pFramesIn, totalInputFramesProcessed*bpf), &inputFramesToProcessThisIteration, ma_offset_ptr(devnull, totalOutputFramesProcessed*bpf), &outputFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + totalOutputFramesProcessed += outputFramesToProcessThisIteration; + totalInputFramesProcessed += inputFramesToProcessThisIteration; + } + } else { + /* Don't process input data - just update timing and filter state as if zeroes were passed in. */ + while (totalOutputFramesProcessed < totalOutputFramesToProcess) { + ma_uint64 inputFramesToProcessThisIteration = 16384; + ma_uint64 outputFramesToProcessThisIteration = (totalOutputFramesToProcess - totalOutputFramesProcessed); + if (outputFramesToProcessThisIteration > sizeof(devnull) / bpf) { + outputFramesToProcessThisIteration = sizeof(devnull) / bpf; + } + + result = ma_resampler_process_pcm_frames__read(pResampler, NULL, &inputFramesToProcessThisIteration, ma_offset_ptr(devnull, totalOutputFramesProcessed*bpf), &outputFramesToProcessThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + totalOutputFramesProcessed += outputFramesToProcessThisIteration; + totalInputFramesProcessed += inputFramesToProcessThisIteration; } } - return MA_FALSE; + + if (pFrameCountIn != NULL) { + *pFrameCountIn = totalInputFramesProcessed; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = totalOutputFramesProcessed; + } + + return MA_SUCCESS; } +#endif + +static ma_result ma_resampler_process_pcm_frames__seek(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, ma_uint64* pFrameCountOut) +{ + MA_ASSERT(pResampler != NULL); + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_resampler_process_pcm_frames__seek__linear(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut); + } break; + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return ma_resampler_process_pcm_frames__seek__speex(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut); + #else + break; + #endif + }; + default: break; + } -/************************************************************************************************************************************************************** + /* Should never hit this. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_ARGS; +} -Format Conversion. -**************************************************************************************************************************************************************/ -void ma_copy_memory_64(void* dst, const void* src, ma_uint64 sizeInBytes) +ma_result ma_resampler_process_pcm_frames(ma_resampler* pResampler, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { -#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX - ma_copy_memory(dst, src, (size_t)sizeInBytes); -#else - while (sizeInBytes > 0) { - ma_uint64 bytesToCopyNow = sizeInBytes; - if (bytesToCopyNow > MA_SIZE_MAX) { - bytesToCopyNow = MA_SIZE_MAX; - } + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } - ma_copy_memory(dst, src, (size_t)bytesToCopyNow); /* Safe cast to size_t. */ + if (pFrameCountOut == NULL && pFrameCountIn == NULL) { + return MA_INVALID_ARGS; + } - sizeInBytes -= bytesToCopyNow; - dst = ( void*)(( ma_uint8*)dst + bytesToCopyNow); - src = (const void*)((const ma_uint8*)src + bytesToCopyNow); + if (pFramesOut != NULL) { + /* Reading. */ + return ma_resampler_process_pcm_frames__read(pResampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Seeking. */ + return ma_resampler_process_pcm_frames__seek(pResampler, pFramesIn, pFrameCountIn, pFrameCountOut); } -#endif } -void ma_zero_memory_64(void* dst, ma_uint64 sizeInBytes) +ma_result ma_resampler_set_rate(ma_resampler* pResampler, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) { -#if 0xFFFFFFFFFFFFFFFF <= MA_SIZE_MAX - ma_zero_memory(dst, (size_t)sizeInBytes); -#else - while (sizeInBytes > 0) { - ma_uint64 bytesToZeroNow = sizeInBytes; - if (bytesToZeroNow > MA_SIZE_MAX) { - bytesToZeroNow = MA_SIZE_MAX; - } + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } - ma_zero_memory(dst, (size_t)bytesToZeroNow); /* Safe cast to size_t. */ + if (sampleRateIn == 0 || sampleRateOut == 0) { + return MA_INVALID_ARGS; + } - sizeInBytes -= bytesToZeroNow; - dst = (void*)((ma_uint8*)dst + bytesToZeroNow); + pResampler->config.sampleRateIn = sampleRateIn; + pResampler->config.sampleRateOut = sampleRateOut; + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_set_rate(&pResampler->state.linear, sampleRateIn, sampleRateOut); + } break; + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return ma_result_from_speex_err(speex_resampler_set_rate((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, sampleRateIn, sampleRateOut)); + #else + break; + #endif + }; + + default: break; } -#endif -} + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return MA_INVALID_OPERATION; +} -/* u8 */ -void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_result ma_resampler_set_rate_ratio(ma_resampler* pResampler, float ratio) { - (void)ditherMode; - ma_copy_memory_64(dst, src, count * sizeof(ma_uint8)); + if (pResampler == NULL) { + return MA_INVALID_ARGS; + } + + if (pResampler->config.algorithm == ma_resample_algorithm_linear) { + return ma_linear_resampler_set_rate_ratio(&pResampler->state.linear, ratio); + } else { + /* Getting here means the backend does not have native support for setting the rate as a ratio so we just do it generically. */ + ma_uint32 n; + ma_uint32 d; + + d = 1000000; /* We use up to 6 decimal places. */ + n = (ma_uint32)(ratio * d); + + if (n == 0) { + return MA_INVALID_ARGS; /* Ratio too small. */ + } + + MA_ASSERT(n != 0); + + return ma_resampler_set_rate(pResampler, n, d); + } } +ma_uint64 ma_resampler_get_required_input_frame_count(ma_resampler* pResampler, ma_uint64 outputFrameCount) +{ + if (pResampler == NULL) { + return 0; + } + + if (outputFrameCount == 0) { + return 0; + } + + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_required_input_frame_count(&pResampler->state.linear, outputFrameCount); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + ma_uint64 count; + int speexErr = ma_speex_resampler_get_required_input_frame_count((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, outputFrameCount, &count); + if (speexErr != RESAMPLER_ERR_SUCCESS) { + return 0; + } -void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_int16* dst_s16 = (ma_int16*)dst; - const ma_uint8* src_u8 = (const ma_uint8*)src; + return count; + #else + break; + #endif + } - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int16 x = src_u8[i]; - x = x - 128; - x = x << 8; - dst_s16[i] = x; + default: break; } - (void)ditherMode; -} - -void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_u8_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_u8_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s16__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_uint64 ma_resampler_get_expected_output_frame_count(ma_resampler* pResampler, ma_uint64 inputFrameCount) { - ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); -} -#endif + if (pResampler == NULL) { + return 0; /* Invalid args. */ + } -void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); -#else - ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); -#endif -} + if (inputFrameCount == 0) { + return 0; + } + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_expected_output_frame_count(&pResampler->state.linear, inputFrameCount); + } -void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint8* dst_s24 = (ma_uint8*)dst; - const ma_uint8* src_u8 = (const ma_uint8*)src; + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + ma_uint64 count; + int speexErr = ma_speex_resampler_get_expected_output_frame_count((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState, inputFrameCount, &count); + if (speexErr != RESAMPLER_ERR_SUCCESS) { + return 0; + } - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int16 x = src_u8[i]; - x = x - 128; + return count; + #else + break; + #endif + } - dst_s24[i*3+0] = 0; - dst_s24[i*3+1] = 0; - dst_s24[i*3+2] = (ma_uint8)((ma_int8)x); + default: break; } - (void)ditherMode; + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; } -void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_uint64 ma_resampler_get_input_latency(ma_resampler* pResampler) { - ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); -} + if (pResampler == NULL) { + return 0; + } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_u8_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_u8_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s24__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_input_latency(&pResampler->state.linear); + } + + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return (ma_uint64)ma_speex_resampler_get_input_latency((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState); + #else + break; + #endif + } + + default: break; + } + + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; } -#endif -void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_uint64 ma_resampler_get_output_latency(ma_resampler* pResampler) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); -#else - ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); -#endif -} + if (pResampler == NULL) { + return 0; + } + switch (pResampler->config.algorithm) + { + case ma_resample_algorithm_linear: + { + return ma_linear_resampler_get_output_latency(&pResampler->state.linear); + } -void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_int32* dst_s32 = (ma_int32*)dst; - const ma_uint8* src_u8 = (const ma_uint8*)src; + case ma_resample_algorithm_speex: + { + #if defined(MA_HAS_SPEEX_RESAMPLER) + return (ma_uint64)ma_speex_resampler_get_output_latency((SpeexResamplerState*)pResampler->state.speex.pSpeexResamplerState); + #else + break; + #endif + } - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = src_u8[i]; - x = x - 128; - x = x << 24; - dst_s32[i] = x; + default: break; } - (void)ditherMode; + /* Should never get here. */ + MA_ASSERT(MA_FALSE); + return 0; } -void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); -} +/************************************************************************************************************************************************************** -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_u8_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); -} +Channel Conversion + +**************************************************************************************************************************************************************/ +#ifndef MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT +#define MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT 12 #endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_u8_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) + +#define MA_PLANE_LEFT 0 +#define MA_PLANE_RIGHT 1 +#define MA_PLANE_FRONT 2 +#define MA_PLANE_BACK 3 +#define MA_PLANE_BOTTOM 4 +#define MA_PLANE_TOP 5 + +float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = { + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */ + { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */ + { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */ + { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */ + { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */ + { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */ + { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */ + { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ + { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */ + { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */ + { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */ + { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */ + { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */ + { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */ + { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */ + { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */ + { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */ + { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */ +}; + +float ma_calculate_channel_position_rectangular_weight(ma_channel channelPositionA, ma_channel channelPositionB) { - ma_pcm_u8_to_s32__avx2(dst, src, count, ditherMode); + /* + Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to + the following output configuration: + + - front/left + - side/left + - back/left + + The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount + of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated. + + Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left + speaker emitting half of it's total volume from the front, and the other half from the left. Since part of it's volume is being emitted + from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would + receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between + the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works + across 3 spatial dimensions. + + The first thing to do is figure out how each speaker's volume is spread over each of plane: + - front/left: 2 planes (front and left) = 1/2 = half it's total volume on each plane + - side/left: 1 plane (left only) = 1/1 = entire volume from left plane + - back/left: 2 planes (back and left) = 1/2 = half it's total volume on each plane + - top/front/left: 3 planes (top, front and left) = 1/3 = one third it's total volume on each plane + + The amount of volume each channel contributes to each of it's planes is what controls how much it is willing to given and take to other + channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be + taken by the other to produce the final contribution. + */ + + /* Contribution = Sum(Volume to Give * Volume to Take) */ + float contribution = + g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] + + g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] + + g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] + + g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] + + g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] + + g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5]; + + return contribution; } -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) + +ma_channel_converter_config ma_channel_converter_config_init(ma_format format, ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode) { - ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); + ma_channel_converter_config config; + MA_ZERO_OBJECT(&config); + config.format = format; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + ma_channel_map_copy(config.channelMapIn, channelMapIn, channelsIn); + ma_channel_map_copy(config.channelMapOut, channelMapOut, channelsOut); + config.mixingMode = mixingMode; + + return config; } -#endif -void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_int32 ma_channel_converter_float_to_fp(float x) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); -#else - ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); -#endif + return (ma_int32)(x * (1<channelsIn, pConfig->channelMapIn)) { + return MA_INVALID_ARGS; /* Invalid input channel map. */ + } + if (!ma_channel_map_valid(pConfig->channelsOut, pConfig->channelMapOut)) { + return MA_INVALID_ARGS; /* Invalid output channel map. */ + } - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; - } + if (pConfig->format != ma_format_s16 && pConfig->format != ma_format_f32) { + return MA_INVALID_ARGS; /* Invalid format. */ } -} -void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_uint8* dst_u8 = (ma_uint8*)dst; - const ma_uint8** src_u8 = (const ma_uint8**)src; + pConverter->format = pConfig->format; + pConverter->channelsIn = pConfig->channelsIn; + pConverter->channelsOut = pConfig->channelsOut; + ma_channel_map_copy(pConverter->channelMapIn, pConfig->channelMapIn, pConfig->channelsIn); + ma_channel_map_copy(pConverter->channelMapOut, pConfig->channelMapOut, pConfig->channelsOut); + pConverter->mixingMode = pConfig->mixingMode; - if (channels == 1) { - ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8)); - } else if (channels == 2) { - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - dst_u8[iFrame*2 + 0] = src_u8[0][iFrame]; - dst_u8[iFrame*2 + 1] = src_u8[1][iFrame]; - } - } else { - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.f32[iChannelIn][iChannelOut] = pConfig->weights[iChannelIn][iChannelOut]; + } else { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(pConfig->weights[iChannelIn][iChannelOut]); } } } -} + -void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_interleave_u8__reference(dst, src, frameCount, channels); -#else - ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels); -#endif -} + /* If the input and output channels and channel maps are the same we should use a passthrough. */ + if (pConverter->channelsIn == pConverter->channelsOut) { + if (ma_channel_map_equal(pConverter->channelsIn, pConverter->channelMapIn, pConverter->channelMapOut)) { + pConverter->isPassthrough = MA_TRUE; + } + if (ma_channel_map_blank(pConverter->channelsIn, pConverter->channelMapIn) || ma_channel_map_blank(pConverter->channelsOut, pConverter->channelMapOut)) { + pConverter->isPassthrough = MA_TRUE; + } + } -void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_uint8** dst_u8 = (ma_uint8**)dst; - const ma_uint8* src_u8 = (const ma_uint8*)src; - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel]; + /* + We can use a simple case for expanding the mono channel. This will used when expanding a mono input into any output so long + as no LFE is present in the output. + */ + if (!pConverter->isPassthrough) { + if (pConverter->channelsIn == 1 && pConverter->channelMapIn[0] == MA_CHANNEL_MONO) { + /* Optimal case if no LFE is in the output channel map. */ + pConverter->isSimpleMonoExpansion = MA_TRUE; + if (ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->channelMapOut, MA_CHANNEL_LFE)) { + pConverter->isSimpleMonoExpansion = MA_FALSE; + } } } -} -void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); -} + /* Another optimized case is stereo to mono. */ + if (!pConverter->isPassthrough) { + if (pConverter->channelsOut == 1 && pConverter->channelMapOut[0] == MA_CHANNEL_MONO && pConverter->channelsIn == 2) { + /* Optimal case if no LFE is in the input channel map. */ + pConverter->isStereoToMono = MA_TRUE; + if (ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->channelMapIn, MA_CHANNEL_LFE)) { + pConverter->isStereoToMono = MA_FALSE; + } + } + } -void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); -#else - ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels); -#endif -} + /* + Here is where we do a bit of pre-processing to know how each channel should be combined to make up the output. Rules: + + 1) If it's a passthrough, do nothing - it's just a simple memcpy(). + 2) If the channel counts are the same and every channel position in the input map is present in the output map, use a + simple shuffle. An example might be different 5.1 channel layouts. + 3) Otherwise channels are blended based on spatial locality. + */ + if (!pConverter->isPassthrough) { + if (pConverter->channelsIn == pConverter->channelsOut) { + ma_bool32 areAllChannelPositionsPresent = MA_TRUE; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_bool32 isInputChannelPositionInOutput = MA_FALSE; + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + if (pConverter->channelMapIn[iChannelIn] == pConverter->channelMapOut[iChannelOut]) { + isInputChannelPositionInOutput = MA_TRUE; + break; + } + } -/* s16 */ -void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint8* dst_u8 = (ma_uint8*)dst; - const ma_int16* src_s16 = (const ma_int16*)src; + if (!isInputChannelPositionInOutput) { + areAllChannelPositionsPresent = MA_FALSE; + break; + } + } - if (ditherMode == ma_dither_mode_none) { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int16 x = src_s16[i]; - x = x >> 8; - x = x + 128; - dst_u8[i] = (ma_uint8)x; - } - } else { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int16 x = src_s16[i]; + if (areAllChannelPositionsPresent) { + pConverter->isSimpleShuffle = MA_TRUE; - /* Dither. Don't overflow. */ - ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F); - if ((x + dither) <= 0x7FFF) { - x = (ma_int16)(x + dither); - } else { - x = 0x7FFF; + /* + All the router will be doing is rearranging channels which means all we need to do is use a shuffling table which is just + a mapping between the index of the input channel to the index of the output channel. + */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + if (pConverter->channelMapIn[iChannelIn] == pConverter->channelMapOut[iChannelOut]) { + pConverter->shuffleTable[iChannelIn] = (ma_uint8)iChannelOut; + break; + } + } + } } - - x = x >> 8; - x = x + 128; - dst_u8[i] = (ma_uint8)x; } } -} -void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); -} -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s16_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s16_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_u8__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); -} -#endif + /* + Here is where weights are calculated. Note that we calculate the weights at all times, even when using a passthrough and simple + shuffling. We use different algorithms for calculating weights depending on our mixing mode. + + In simple mode we don't do any blending (except for converting between mono, which is done in a later step). Instead we just + map 1:1 matching channels. In this mode, if no channels in the input channel map correspond to anything in the output channel + map, nothing will be heard! + */ -void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); -#else - ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); -#endif -} + /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; -void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - (void)ditherMode; - ma_copy_memory_64(dst, src, count * sizeof(ma_int16)); -} + if (channelPosIn == channelPosOut) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.s16[iChannelIn][iChannelOut] = (1 << MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT); + } else { + pConverter->weights.f32[iChannelIn][iChannelOut] = 1; + } + } + } + } + /* + The mono channel is accumulated on all other channels, except LFE. Make sure in this loop we exclude output mono channels since + they were handled in the pass above. + */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; -void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint8* dst_s24 = (ma_uint8*)dst; - const ma_int16* src_s16 = (const ma_int16*)src; + if (channelPosIn == MA_CHANNEL_MONO) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; - ma_uint64 i; - for (i = 0; i < count; i += 1) { - dst_s24[i*3+0] = 0; - dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF); - dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8); + if (channelPosOut != MA_CHANNEL_NONE && channelPosOut != MA_CHANNEL_MONO && channelPosOut != MA_CHANNEL_LFE) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.s16[iChannelIn][iChannelOut] = (1 << MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT); + } else { + pConverter->weights.f32[iChannelIn][iChannelOut] = 1; + } + } + } + } } - (void)ditherMode; -} - -void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); -} + /* The output mono channel is the average of all non-none, non-mono and non-lfe input channels. */ + { + ma_uint32 len = 0; + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s16_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s16_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s24__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); -} -#endif + if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) { + len += 1; + } + } -void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); -#else - ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); -#endif -} + if (len > 0) { + float monoWeight = 1.0f / len; + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; -void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_int32* dst_s32 = (ma_int32*)dst; - const ma_int16* src_s16 = (const ma_int16*)src; + if (channelPosOut == MA_CHANNEL_MONO) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; - ma_uint64 i; - for (i = 0; i < count; i += 1) { - dst_s32[i] = src_s16[i] << 16; + if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) { + if (pConverter->format == ma_format_s16) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(monoWeight); + } else { + pConverter->weights.f32[iChannelIn][iChannelOut] = monoWeight; + } + } + } + } + } + } } - (void)ditherMode; -} -void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); -} + /* Input and output channels that are not present on the other side need to be blended in based on spatial locality. */ + switch (pConverter->mixingMode) + { + case ma_channel_mix_mode_rectangular: + { + /* Unmapped input channels. */ + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s16_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s16_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s32__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); -} -#endif + if (ma_is_spatial_channel_position(channelPosIn)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsOut, pConverter->channelMapOut, channelPosIn)) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; -void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); -#else - ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); -#endif -} + if (ma_is_spatial_channel_position(channelPosOut)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_s16) { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(weight); + } + } else { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } + } + } + } + } + } -void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - float* dst_f32 = (float*)dst; - const ma_int16* src_s16 = (const ma_int16*)src; + /* Unmapped output channels. */ + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_channel channelPosOut = pConverter->channelMapOut[iChannelOut]; - ma_uint64 i; - for (i = 0; i < count; i += 1) { - float x = (float)src_s16[i]; + if (ma_is_spatial_channel_position(channelPosOut)) { + if (!ma_channel_map_contains_channel_position(pConverter->channelsIn, pConverter->channelMapIn, channelPosOut)) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + ma_channel channelPosIn = pConverter->channelMapIn[iChannelIn]; -#if 0 - /* The accurate way. */ - x = x + 32768.0f; /* -32768..32767 to 0..65535 */ - x = x * 0.00003051804379339284f; /* 0..65536 to 0..2 */ - x = x - 1; /* 0..2 to -1..1 */ -#else - /* The fast way. */ - x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */ -#endif + if (ma_is_spatial_channel_position(channelPosIn)) { + float weight = 0; + if (pConverter->mixingMode == ma_channel_mix_mode_rectangular) { + weight = ma_calculate_channel_position_rectangular_weight(channelPosIn, channelPosOut); + } - dst_f32[i] = x; + /* Only apply the weight if we haven't already got some contribution from the respective channels. */ + if (pConverter->format == ma_format_s16) { + if (pConverter->weights.s16[iChannelIn][iChannelOut] == 0) { + pConverter->weights.s16[iChannelIn][iChannelOut] = ma_channel_converter_float_to_fp(weight); + } + } else { + if (pConverter->weights.f32[iChannelIn][iChannelOut] == 0) { + pConverter->weights.f32[iChannelIn][iChannelOut] = weight; + } + } + } + } + } + } + } + } break; + + case ma_channel_mix_mode_custom_weights: + case ma_channel_mix_mode_simple: + default: + { + /* Fallthrough. */ + } break; } - (void)ditherMode; -} -void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); + return MA_SUCCESS; } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s16_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +void ma_channel_converter_uninit(ma_channel_converter* pConverter) { - ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s16_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s16_to_f32__avx2(dst, src, count, ditherMode); + if (pConverter == NULL) { + return; + } } -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) + +static ma_result ma_channel_converter_process_pcm_frames__passthrough(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) { - ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; } -#endif -void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_channel_converter_process_pcm_frames__simple_shuffle(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); -#else - ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); -#endif -} + ma_uint32 iFrame; + ma_uint32 iChannelIn; + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == pConverter->channelsOut); -void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_int16* dst_s16 = (ma_int16*)dst; - const ma_int16** src_s16 = (const ma_int16**)src; + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame]; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + pFramesOutS16[pConverter->shuffleTable[iChannelIn]] = pFramesInS16[iChannelIn]; + } + } + } else { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + pFramesOutF32[pConverter->shuffleTable[iChannelIn]] = pFramesInF32[iChannelIn]; + } } } -} -void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); + return MA_SUCCESS; } -void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +static ma_result ma_channel_converter_process_pcm_frames__simple_mono_expansion(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); -#else - ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels); -#endif -} + ma_uint64 iFrame; + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); -void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_int16** dst_s16 = (ma_int16**)dst; - const ma_int16* src_s16 = (const ma_int16*)src; + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel]; + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutS16[iFrame*2 + 0] = pFramesInS16[iFrame]; + pFramesOutS16[iFrame*2 + 1] = pFramesInS16[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutS16[iFrame*pConverter->channelsOut + iChannel] = pFramesInS16[iFrame]; + } + } } - } -} - -void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); -} + } else { + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; -void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); -#else - ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels); -#endif -} + if (pConverter->channelsOut == 2) { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutF32[iFrame*2 + 0] = pFramesInF32[iFrame]; + pFramesOutF32[iFrame*2 + 1] = pFramesInF32[iFrame]; + } + } else { + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < pConverter->channelsOut; iChannel += 1) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannel] = pFramesInF32[iFrame]; + } + } + } + } + return MA_SUCCESS; +} -/* s24 */ -void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_channel_converter_process_pcm_frames__stereo_to_mono(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) { - ma_uint8* dst_u8 = (ma_uint8*)dst; - const ma_uint8* src_s24 = (const ma_uint8*)src; + ma_uint64 iFrame; - if (ditherMode == ma_dither_mode_none) { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int8 x = (ma_int8)src_s24[i*3 + 2] + 128; - dst_u8[i] = (ma_uint8)x; + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + MA_ASSERT(pConverter->channelsIn == 2); + MA_ASSERT(pConverter->channelsOut == 1); + + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutS16[iFrame] = (ma_int16)(((ma_int32)pFramesInS16[iFrame*2+0] + (ma_int32)pFramesInS16[iFrame*2+1]) / 2); } } else { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; - /* Dither. Don't overflow. */ - ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); - if ((ma_int64)x + dither <= 0x7FFFFFFF) { - x = x + dither; - } else { - x = 0x7FFFFFFF; - } - - x = x >> 24; - x = x + 128; - dst_u8[i] = (ma_uint8)x; + for (iFrame = 0; iFrame < frameCount; ++iFrame) { + pFramesOutF32[iFrame] = (pFramesInF32[iFrame*2+0] + pFramesInF32[iFrame*2+0]) * 0.5f; } } -} -void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); + return MA_SUCCESS; } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s24_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s24_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_u8__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_channel_converter_process_pcm_frames__weights(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) { - ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); -} -#endif + ma_uint32 iFrame; + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; -void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); -#else - ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); -#endif -} + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pFramesIn != NULL); + /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */ -void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_int16* dst_s16 = (ma_int16*)dst; - const ma_uint8* src_s24 = (const ma_uint8*)src; + /* Clear. */ + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); - if (ditherMode == ma_dither_mode_none) { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]); - ma_uint16 dst_hi = ((ma_uint16)src_s24[i*3 + 2]) << 8; - dst_s16[i] = (ma_int16)dst_lo | dst_hi; + /* Accumulate. */ + if (pConverter->format == ma_format_s16) { + /* */ ma_int16* pFramesOutS16 = ( ma_int16*)pFramesOut; + const ma_int16* pFramesInS16 = (const ma_int16*)pFramesIn; + + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + ma_int32 s = pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut]; + s += (pFramesInS16[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.s16[iChannelIn][iChannelOut]) >> MA_CHANNEL_CONVERTER_FIXED_POINT_SHIFT; + + pFramesOutS16[iFrame*pConverter->channelsOut + iChannelOut] = (ma_int16)ma_clamp(s, -32768, 32767); + } + } } } else { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + /* */ float* pFramesOutF32 = ( float*)pFramesOut; + const float* pFramesInF32 = (const float*)pFramesIn; - /* Dither. Don't overflow. */ - ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); - if ((ma_int64)x + dither <= 0x7FFFFFFF) { - x = x + dither; - } else { - x = 0x7FFFFFFF; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + for (iChannelIn = 0; iChannelIn < pConverter->channelsIn; ++iChannelIn) { + for (iChannelOut = 0; iChannelOut < pConverter->channelsOut; ++iChannelOut) { + pFramesOutF32[iFrame*pConverter->channelsOut + iChannelOut] += pFramesInF32[iFrame*pConverter->channelsIn + iChannelIn] * pConverter->weights.f32[iChannelIn][iChannelOut]; + } } - - x = x >> 16; - dst_s16[i] = (ma_int16)x; } } + + return MA_SUCCESS; } -void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_result ma_channel_converter_process_pcm_frames(ma_channel_converter* pConverter, void* pFramesOut, const void* pFramesIn, ma_uint64 frameCount) { - ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); -} + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s24_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s24_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s16__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); -} -#endif + if (pFramesOut == NULL) { + return MA_INVALID_ARGS; + } -void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); -#else - ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); -#endif -} + if (pFramesIn == NULL) { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->format, pConverter->channelsOut)); + return MA_SUCCESS; + } + if (pConverter->isPassthrough) { + return ma_channel_converter_process_pcm_frames__passthrough(pConverter, pFramesOut, pFramesIn, frameCount); + } else if (pConverter->isSimpleShuffle) { + return ma_channel_converter_process_pcm_frames__simple_shuffle(pConverter, pFramesOut, pFramesIn, frameCount); + } else if (pConverter->isSimpleMonoExpansion) { + return ma_channel_converter_process_pcm_frames__simple_mono_expansion(pConverter, pFramesOut, pFramesIn, frameCount); + } else if (pConverter->isStereoToMono) { + return ma_channel_converter_process_pcm_frames__stereo_to_mono(pConverter, pFramesOut, pFramesIn, frameCount); + } else { + return ma_channel_converter_process_pcm_frames__weights(pConverter, pFramesOut, pFramesIn, frameCount); + } +} -void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - (void)ditherMode; - ma_copy_memory_64(dst, src, count * 3); -} +/************************************************************************************************************************************************************** +Data Conversion -void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +**************************************************************************************************************************************************************/ +ma_data_converter_config ma_data_converter_config_init_default() { - ma_int32* dst_s32 = (ma_int32*)dst; - const ma_uint8* src_s24 = (const ma_uint8*)src; + ma_data_converter_config config; + MA_ZERO_OBJECT(&config); - ma_uint64 i; - for (i = 0; i < count; i += 1) { - dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); - } + config.ditherMode = ma_dither_mode_none; + config.resampling.algorithm = ma_resample_algorithm_linear; + config.resampling.allowDynamicSampleRate = MA_FALSE; /* Disable dynamic sample rates by default because dynamic rate adjustments should be quite rare and it allows an optimization for cases when the in and out sample rates are the same. */ - (void)ditherMode; -} + /* Linear resampling defaults. */ + config.resampling.linear.lpfCount = 1; + config.resampling.linear.lpfNyquistFactor = 1; -void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); -} + /* Speex resampling defaults. */ + config.resampling.speex.quality = 3; -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s24_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s24_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s32__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); + return config; } -#endif -void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_data_converter_config ma_data_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channelsIn, ma_uint32 channelsOut, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); -#else - ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); -#endif + ma_data_converter_config config = ma_data_converter_config_init_default(); + config.formatIn = formatIn; + config.formatOut = formatOut; + config.channelsIn = channelsIn; + config.channelsOut = channelsOut; + config.sampleRateIn = sampleRateIn; + config.sampleRateOut = sampleRateOut; + + return config; } - -void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +ma_result ma_data_converter_init(const ma_data_converter_config* pConfig, ma_data_converter* pConverter) { - float* dst_f32 = (float*)dst; - const ma_uint8* src_s24 = (const ma_uint8*)src; + ma_result result; + ma_format midFormat; - ma_uint64 i; - for (i = 0; i < count; i += 1) { - float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8); + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } -#if 0 - /* The accurate way. */ - x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */ - x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */ - x = x - 1; /* 0..2 to -1..1 */ -#else - /* The fast way. */ - x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */ -#endif + MA_ZERO_OBJECT(pConverter); - dst_f32[i] = x; + if (pConfig == NULL) { + return MA_INVALID_ARGS; } - (void)ditherMode; -} + pConverter->config = *pConfig; -void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); -} + /* + We want to avoid as much data conversion as possible. The channel converter and resampler both support s16 and f32 natively. We need to decide + on the format to use for this stage. We call this the mid format because it's used in the middle stage of the conversion pipeline. If the output + format is either s16 or f32 we use that one. If that is not the case it will do the same thing for the input format. If it's neither we just + use f32. + */ + /* */ if (pConverter->config.formatOut == ma_format_s16 || pConverter->config.formatOut == ma_format_f32) { + midFormat = pConverter->config.formatOut; + } else if (pConverter->config.formatIn == ma_format_s16 || pConverter->config.formatIn == ma_format_f32) { + midFormat = pConverter->config.formatIn; + } else { + midFormat = ma_format_f32; + } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s24_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s24_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_f32__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); -} -#endif + if (pConverter->config.formatIn != midFormat) { + pConverter->hasPreFormatConversion = MA_TRUE; + } + if (pConverter->config.formatOut != midFormat) { + pConverter->hasPostFormatConversion = MA_TRUE; + } -void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); -#else - ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); -#endif -} + /* Channel converter. We always initialize this, but we check if it configures itself as a passthrough to determine whether or not it's needed. */ + { + ma_uint32 iChannelIn; + ma_uint32 iChannelOut; + ma_channel_converter_config channelConverterConfig; -void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_uint8* dst8 = (ma_uint8*)dst; - const ma_uint8** src8 = (const ma_uint8**)src; + channelConverterConfig = ma_channel_converter_config_init(midFormat, pConverter->config.channelsIn, pConverter->config.channelMapIn, pConverter->config.channelsOut, pConverter->config.channelMapOut, pConverter->config.channelMixMode); + + /* Channel weights. */ + for (iChannelIn = 0; iChannelIn < pConverter->config.channelsIn; iChannelIn += 1) { + for (iChannelOut = 0; iChannelOut < pConverter->config.channelsOut; iChannelOut += 1) { + channelConverterConfig.weights[iChannelIn][iChannelOut] = pConverter->config.channelWeights[iChannelIn][iChannelOut]; + } + } + + result = ma_channel_converter_init(&channelConverterConfig, &pConverter->channelConverter); + if (result != MA_SUCCESS) { + return result; + } - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0]; - dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1]; - dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2]; + /* If the channel converter is not a passthrough we need to enable it. Otherwise we can skip it. */ + if (pConverter->channelConverter.isPassthrough == MA_FALSE) { + pConverter->hasChannelConverter = MA_TRUE; } } -} -void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); -} -void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); -#else - ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels); -#endif -} + /* Always enable dynamic sample rates if the input sample rate is different because we're always going to need a resampler in this case anyway. */ + if (pConverter->config.resampling.allowDynamicSampleRate == MA_FALSE) { + pConverter->config.resampling.allowDynamicSampleRate = pConverter->config.sampleRateIn != pConverter->config.sampleRateOut; + } + /* Resampler. */ + if (pConverter->config.resampling.allowDynamicSampleRate) { + ma_resampler_config resamplerConfig; + ma_uint32 resamplerChannels; -void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_uint8** dst8 = (ma_uint8**)dst; - const ma_uint8* src8 = (const ma_uint8*)src; + /* The resampler is the most expensive part of the conversion process, so we need to do it at the stage where the channel count is at it's lowest. */ + if (pConverter->config.channelsIn < pConverter->config.channelsOut) { + resamplerChannels = pConverter->config.channelsIn; + } else { + resamplerChannels = pConverter->config.channelsOut; + } - ma_uint32 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0]; - dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1]; - dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2]; + resamplerConfig = ma_resampler_config_init(midFormat, resamplerChannels, pConverter->config.sampleRateIn, pConverter->config.sampleRateOut, pConverter->config.resampling.algorithm); + resamplerConfig.linear.lpfCount = pConverter->config.resampling.linear.lpfCount; + resamplerConfig.linear.lpfNyquistFactor = pConverter->config.resampling.linear.lpfNyquistFactor; + resamplerConfig.speex.quality = pConverter->config.resampling.speex.quality; + + result = ma_resampler_init(&resamplerConfig, &pConverter->resampler); + if (result != MA_SUCCESS) { + return result; } + + pConverter->hasResampler = MA_TRUE; } + + /* We can enable passthrough optimizations if applicable. Note that we'll only be able to do this if the sample rate is static. */ + if (pConverter->hasPreFormatConversion == MA_FALSE && + pConverter->hasPostFormatConversion == MA_FALSE && + pConverter->hasChannelConverter == MA_FALSE && + pConverter->hasResampler == MA_FALSE) { + pConverter->isPassthrough = MA_TRUE; + } + + return MA_SUCCESS; } -void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +void ma_data_converter_uninit(ma_data_converter* pConverter) { - ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); + if (pConverter == NULL) { + return; + } + + if (pConverter->hasResampler) { + ma_resampler_uninit(&pConverter->resampler); + } } -void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +static ma_result ma_data_converter_process_pcm_frames__passthrough(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); -#else - ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels); -#endif -} + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } -/* s32 */ -void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint8* dst_u8 = (ma_uint8*)dst; - const ma_int32* src_s32 = (const ma_int32*)src; + frameCount = ma_min(frameCountIn, frameCountOut); - if (ditherMode == ma_dither_mode_none) { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = src_s32[i]; - x = x >> 24; - x = x + 128; - dst_u8[i] = (ma_uint8)x; + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_copy_memory_64(pFramesOut, pFramesIn, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); } - } else { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = src_s32[i]; + } - /* Dither. Don't overflow. */ - ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); - if ((ma_int64)x + dither <= 0x7FFFFFFF) { - x = x + dither; - } else { - x = 0x7FFFFFFF; - } - - x = x >> 24; - x = x + 128; - dst_u8[i] = (ma_uint8)x; - } + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; } -} -void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); + return MA_SUCCESS; } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s32_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_data_converter_process_pcm_frames__format_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - ma_pcm_s32_to_u8__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); -} -#endif + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; -void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); -#else - ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); -#endif -} + MA_ASSERT(pConverter != NULL); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } -void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_int16* dst_s16 = (ma_int16*)dst; - const ma_int32* src_s32 = (const ma_int32*)src; + frameCount = ma_min(frameCountIn, frameCountOut); - if (ditherMode == ma_dither_mode_none) { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = src_s32[i]; - x = x >> 16; - dst_s16[i] = (ma_int16)x; + if (pFramesOut != NULL) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pFramesOut, pConverter->config.formatOut, pFramesIn, pConverter->config.formatIn, frameCount, pConverter->config.channelsIn, pConverter->config.ditherMode); + } else { + ma_zero_memory_64(pFramesOut, frameCount * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); } - } else { - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_int32 x = src_s32[i]; + } - /* Dither. Don't overflow. */ - ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); - if ((ma_int64)x + dither <= 0x7FFFFFFF) { - x = x + dither; - } else { - x = 0x7FFFFFFF; - } - - x = x >> 16; - dst_s16[i] = (ma_int16)x; - } + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; } -} -void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); + return MA_SUCCESS; } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s32_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s16__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); -} -#endif -void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_data_converter_process_pcm_frames__resample_with_format_conversion(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); -#else - ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); -#endif -} + ma_result result = MA_SUCCESS; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + MA_ASSERT(pConverter != NULL); -void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint8* dst_s24 = (ma_uint8*)dst; - const ma_int32* src_s32 = (const ma_int32*)src; + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } - ma_uint64 i; - for (i = 0; i < count; i += 1) { - ma_uint32 x = (ma_uint32)src_s32[i]; - dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8); - dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16); - dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24); + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; } - (void)ditherMode; /* No dithering for s32 -> s24. */ -} + framesProcessedIn = 0; + framesProcessedOut = 0; -void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); -} + while (framesProcessedOut < frameCountOut) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s32_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s24__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); -} -#endif + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } else { + pFramesInThisIteration = NULL; + } -void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); -#else - ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); -#endif -} + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); -void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - (void)ditherMode; + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } - ma_copy_memory_64(dst, src, count * sizeof(ma_int32)); -} + if (pConverter->hasPostFormatConversion) { + if (frameCountInThisIteration > tempBufferOutCap) { + frameCountInThisIteration = tempBufferOutCap; + } + } + + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.config.format, pFramesInThisIteration, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferIn, &frameCountInThisIteration, pFramesOutThisIteration, &frameCountOutThisIteration); + } + + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); + + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesInThisIteration, &frameCountInThisIteration, pTempBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + break; + } + } + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->config.formatOut, pTempBufferOut, pConverter->resampler.config.format, frameCountOutThisIteration, pConverter->resampler.config.channels, pConverter->config.ditherMode); + } + } -void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - float* dst_f32 = (float*)dst; - const ma_int32* src_s32 = (const ma_int32*)src; + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; - ma_uint64 i; - for (i = 0; i < count; i += 1) { - double x = src_s32[i]; + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); -#if 0 - x = x + 2147483648.0; - x = x * 0.0000000004656612873077392578125; - x = x - 1; -#else - x = x / 2147483648.0; -#endif + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } - dst_f32[i] = (float)x; + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; } - (void)ditherMode; /* No dithering for s32 -> f32. */ + return result; } -void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_data_converter_process_pcm_frames__resample_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); -} + MA_ASSERT(pConverter != NULL); -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_s32_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_s32_to_f32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_f32__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* Neither pre- nor post-format required. This is simple case where only resampling is required. */ + return ma_resampler_process_pcm_frames(&pConverter->resampler, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Format conversion required. */ + return ma_data_converter_process_pcm_frames__resample_with_format_conversion(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } } -#endif -void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_data_converter_process_pcm_frames__channels_only(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); -#else - ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); -#endif -} + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 frameCount; + MA_ASSERT(pConverter != NULL); -void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_int32* dst_s32 = (ma_int32*)dst; - const ma_int32** src_s32 = (const ma_int32**)src; + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame]; - } + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; } -} -void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); -} + frameCount = ma_min(frameCountIn, frameCountOut); -void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); -#else - ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels); -#endif -} + if (pConverter->hasPreFormatConversion == MA_FALSE && pConverter->hasPostFormatConversion == MA_FALSE) { + /* No format conversion required. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOut, pFramesIn, frameCount); + if (result != MA_SUCCESS) { + return result; + } + } else { + /* Format conversion required. */ + ma_uint64 framesProcessed = 0; + + while (framesProcessed < frameCount) { + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + const void* pFramesInThisIteration; + /* */ void* pFramesOutThisIteration; + ma_uint64 frameCountThisIteration; + + if (pFramesIn != NULL) { + pFramesInThisIteration = ma_offset_ptr(pFramesIn, framesProcessed * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } else { + pFramesInThisIteration = NULL; + } + if (pFramesOut != NULL) { + pFramesOutThisIteration = ma_offset_ptr(pFramesOut, framesProcessed * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } else { + pFramesOutThisIteration = NULL; + } -void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_int32** dst_s32 = (ma_int32**)dst; - const ma_int32* src_s32 = (const ma_int32*)src; + /* Do a pre format conversion if necessary. */ + if (pConverter->hasPreFormatConversion) { + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; + const ma_uint32 tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; - for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel]; - } - } -} + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferInCap) { + frameCountThisIteration = tempBufferInCap; + } -void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ - ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); -} + if (pConverter->hasPostFormatConversion) { + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } + } -void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) -{ -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); -#else - ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels); -#endif -} + if (pFramesInThisIteration != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pFramesInThisIteration, pConverter->config.formatIn, frameCountThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + } else { + MA_ZERO_MEMORY(pTempBufferIn, sizeof(pTempBufferIn)); + } + if (pConverter->hasPostFormatConversion) { + /* Both input and output conversion required. Output to the temp buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pTempBufferIn, frameCountThisIteration); + } else { + /* Only pre-format required. Output straight to the output buffer. */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pFramesOutThisIteration, pTempBufferIn, frameCountThisIteration); + } -/* f32 */ -void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint64 i; + if (result != MA_SUCCESS) { + break; + } + } else { + /* No pre-format required. Just read straight from the input buffer. */ + MA_ASSERT(pConverter->hasPostFormatConversion == MA_TRUE); - ma_uint8* dst_u8 = (ma_uint8*)dst; - const float* src_f32 = (const float*)src; + frameCountThisIteration = (frameCount - framesProcessed); + if (frameCountThisIteration > tempBufferOutCap) { + frameCountThisIteration = tempBufferOutCap; + } - float ditherMin = 0; - float ditherMax = 0; - if (ditherMode != ma_dither_mode_none) { - ditherMin = 1.0f / -128; - ditherMax = 1.0f / 127; - } + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferOut, pFramesInThisIteration, frameCountThisIteration); + if (result != MA_SUCCESS) { + break; + } + } - for (i = 0; i < count; i += 1) { - float x = src_f32[i]; - x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - x = x + 1; /* -1..1 to 0..2 */ - x = x * 127.5f; /* 0..2 to 0..255 */ + /* If we are doing a post format conversion we need to do that now. */ + if (pConverter->hasPostFormatConversion) { + if (pFramesOutThisIteration != NULL) { + ma_convert_pcm_frames_format(pFramesOutThisIteration, pConverter->config.formatOut, pTempBufferOut, pConverter->channelConverter.format, frameCountThisIteration, pConverter->channelConverter.channelsOut, pConverter->config.ditherMode); + } + } - dst_u8[i] = (ma_uint8)x; + framesProcessed += frameCountThisIteration; + } } -} -void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); -} + if (pFrameCountIn != NULL) { + *pFrameCountIn = frameCount; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = frameCount; + } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_f32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_f32_to_u8__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_f32_to_u8__avx2(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); + return MA_SUCCESS; } -#endif -void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_data_converter_process_pcm_frames__resampling_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { -#ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); -#else - ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); -#endif -} + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.config.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.config.channels == pConverter->channelConverter.channelsIn); + MA_ASSERT(pConverter->resampler.config.channels < pConverter->channelConverter.channelsOut); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; + } + framesProcessedIn = 0; + framesProcessedOut = 0; -void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint64 i; + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); - ma_int16* dst_s16 = (ma_int16*)dst; - const float* src_f32 = (const float*)src; + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pResampleBufferIn; + void* pChannelsBufferOut; - float ditherMin = 0; - float ditherMax = 0; - if (ditherMode != ma_dither_mode_none) { - ditherMin = 1.0f / -32768; - ditherMax = 1.0f / 32767; - } + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } - for (i = 0; i < count; i += 1) { - float x = src_f32[i]; - x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + /* Run input data through the resampler and output it to the temporary buffer. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); -#if 0 - /* The accurate way. */ - x = x + 1; /* -1..1 to 0..2 */ - x = x * 32767.5f; /* 0..2 to 0..65535 */ - x = x - 32768.0f; /* 0...65535 to -32768..32767 */ -#else - /* The fast way. */ - x = x * 32767.0f; /* -1..1 to -32767..32767 */ -#endif + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + } - dst_s16[i] = (ma_int16)x; - } -} + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } -void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint64 i; - ma_uint64 i4; - ma_uint64 count4; + /* We can't read more frames than can fit in the output buffer. */ + if (pConverter->hasPostFormatConversion) { + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + } - ma_int16* dst_s16 = (ma_int16*)dst; - const float* src_f32 = (const float*)src; + /* We need to ensure we don't try to process too many input frames that we run out of room in the output buffer. If this happens we'll end up glitching. */ + { + ma_uint64 requiredInputFrameCount = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration); + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } - float ditherMin = 0; - float ditherMax = 0; - if (ditherMode != ma_dither_mode_none) { - ditherMin = 1.0f / -32768; - ditherMax = 1.0f / 32767; - } + if (pConverter->hasPreFormatConversion) { + if (pFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->resampler.config.format, pRunningFramesIn, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + pResampleBufferIn = pTempBufferIn; + } else { + pResampleBufferIn = NULL; + } + } else { + pResampleBufferIn = pRunningFramesIn; + } - /* Unrolled. */ - i = 0; - count4 = count >> 2; - for (i4 = 0; i4 < count4; i4 += 1) { - float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax); - float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax); - float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax); - float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax); - - float x0 = src_f32[i+0]; - float x1 = src_f32[i+1]; - float x2 = src_f32[i+2]; - float x3 = src_f32[i+3]; + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pResampleBufferIn, &frameCountInThisIteration, pTempBufferMid, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + + /* + The input data has been resampled so now we need to run it through the channel converter. The input data is always contained in pTempBufferMid. We only need to do + this part if we have an output buffer. + */ + if (pFramesOut != NULL) { + if (pConverter->hasPostFormatConversion) { + pChannelsBufferOut = pTempBufferOut; + } else { + pChannelsBufferOut = pRunningFramesOut; + } - x0 = x0 + d0; - x1 = x1 + d1; - x2 = x2 + d2; - x3 = x3 + d3; + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pChannelsBufferOut, pTempBufferMid, frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } - x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); - x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); - x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); - x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + /* Finally we do post format conversion. */ + if (pConverter->hasPostFormatConversion) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->config.formatOut, pChannelsBufferOut, pConverter->channelConverter.format, frameCountOutThisIteration, pConverter->channelConverter.channelsOut, pConverter->config.ditherMode); + } + } - x0 = x0 * 32767.0f; - x1 = x1 * 32767.0f; - x2 = x2 * 32767.0f; - x3 = x3 * 32767.0f; - dst_s16[i+0] = (ma_int16)x0; - dst_s16[i+1] = (ma_int16)x1; - dst_s16[i+2] = (ma_int16)x2; - dst_s16[i+3] = (ma_int16)x3; + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; - i += 4; - } + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); - /* Leftover. */ - for (; i < count; i += 1) { - float x = src_f32[i]; - x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - x = x * 32767.0f; /* -1..1 to -32767..32767 */ + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } + } - dst_s16[i] = (ma_int16)x; + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; } + + return MA_SUCCESS; } -#if defined(MA_SUPPORT_SSE2) -void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static ma_result ma_data_converter_process_pcm_frames__channels_first(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) { - ma_uint64 i; - ma_uint64 i8; - ma_uint64 count8; - ma_int16* dst_s16; - const float* src_f32; - float ditherMin; - float ditherMax; - - /* Both the input and output buffers need to be aligned to 16 bytes. */ - if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { - ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); - return; + ma_result result; + ma_uint64 frameCountIn; + ma_uint64 frameCountOut; + ma_uint64 framesProcessedIn; + ma_uint64 framesProcessedOut; + ma_uint8 pTempBufferIn[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format. */ + ma_uint64 tempBufferInCap; + ma_uint8 pTempBufferMid[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In resampler format, channel converter input format. */ + ma_uint64 tempBufferMidCap; + ma_uint8 pTempBufferOut[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In channel converter output format. */ + ma_uint64 tempBufferOutCap; + + MA_ASSERT(pConverter != NULL); + MA_ASSERT(pConverter->resampler.config.format == pConverter->channelConverter.format); + MA_ASSERT(pConverter->resampler.config.channels == pConverter->channelConverter.channelsOut); + MA_ASSERT(pConverter->resampler.config.channels < pConverter->channelConverter.channelsIn); + + frameCountIn = 0; + if (pFrameCountIn != NULL) { + frameCountIn = *pFrameCountIn; + } + + frameCountOut = 0; + if (pFrameCountOut != NULL) { + frameCountOut = *pFrameCountOut; } - dst_s16 = (ma_int16*)dst; - src_f32 = (const float*)src; + framesProcessedIn = 0; + framesProcessedOut = 0; - ditherMin = 0; - ditherMax = 0; - if (ditherMode != ma_dither_mode_none) { - ditherMin = 1.0f / -32768; - ditherMax = 1.0f / 32767; - } + tempBufferInCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsIn); + tempBufferMidCap = sizeof(pTempBufferIn) / ma_get_bytes_per_frame(pConverter->channelConverter.format, pConverter->channelConverter.channelsOut); + tempBufferOutCap = sizeof(pTempBufferOut) / ma_get_bytes_per_frame(pConverter->resampler.config.format, pConverter->resampler.config.channels); - i = 0; + while (framesProcessedOut < frameCountOut) { + ma_uint64 frameCountInThisIteration; + ma_uint64 frameCountOutThisIteration; + const void* pRunningFramesIn = NULL; + void* pRunningFramesOut = NULL; + const void* pChannelsBufferIn; + void* pResampleBufferOut; - /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ - count8 = count >> 3; - for (i8 = 0; i8 < count8; i8 += 1) { - __m128 d0; - __m128 d1; - __m128 x0; - __m128 x1; + if (pFramesIn != NULL) { + pRunningFramesIn = ma_offset_ptr(pFramesIn, framesProcessedIn * ma_get_bytes_per_frame(pConverter->config.formatIn, pConverter->config.channelsIn)); + } + if (pFramesOut != NULL) { + pRunningFramesOut = ma_offset_ptr(pFramesOut, framesProcessedOut * ma_get_bytes_per_frame(pConverter->config.formatOut, pConverter->config.channelsOut)); + } - if (ditherMode == ma_dither_mode_none) { - d0 = _mm_set1_ps(0); - d1 = _mm_set1_ps(0); - } else if (ditherMode == ma_dither_mode_rectangle) { - d0 = _mm_set_ps( - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax) - ); - d1 = _mm_set_ps( - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax) - ); + /* Run input data through the channel converter and output it to the temporary buffer. */ + frameCountInThisIteration = (frameCountIn - framesProcessedIn); + + if (pConverter->hasPreFormatConversion) { + if (frameCountInThisIteration > tempBufferInCap) { + frameCountInThisIteration = tempBufferInCap; + } + + if (pRunningFramesIn != NULL) { + ma_convert_pcm_frames_format(pTempBufferIn, pConverter->channelConverter.format, pRunningFramesIn, pConverter->config.formatIn, frameCountInThisIteration, pConverter->config.channelsIn, pConverter->config.ditherMode); + pChannelsBufferIn = pTempBufferIn; + } else { + pChannelsBufferIn = NULL; + } } else { - d0 = _mm_set_ps( - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax) - ); - d1 = _mm_set_ps( - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax) - ); + pChannelsBufferIn = pRunningFramesIn; } - x0 = *((__m128*)(src_f32 + i) + 0); - x1 = *((__m128*)(src_f32 + i) + 1); + /* + We can't convert more frames than will fit in the output buffer. We shouldn't actually need to do this check because the channel count is always reduced + in this case which means we should always have capacity, but I'm leaving it here just for safety for future maintenance. + */ + if (frameCountInThisIteration > tempBufferMidCap) { + frameCountInThisIteration = tempBufferMidCap; + } - x0 = _mm_add_ps(x0, d0); - x1 = _mm_add_ps(x1, d1); + /* + Make sure we don't read any more input frames than we need to fill the output frame count. If we do this we will end up in a situation where we lose some + input samples and will end up glitching. + */ + frameCountOutThisIteration = (frameCountOut - framesProcessedOut); + if (frameCountOutThisIteration > tempBufferMidCap) { + frameCountOutThisIteration = tempBufferMidCap; + } - x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f)); - x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f)); + if (pConverter->hasPostFormatConversion) { + ma_uint64 requiredInputFrameCount; - _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1))); - - i += 8; - } + if (frameCountOutThisIteration > tempBufferOutCap) { + frameCountOutThisIteration = tempBufferOutCap; + } + requiredInputFrameCount = ma_resampler_get_required_input_frame_count(&pConverter->resampler, frameCountOutThisIteration); + if (frameCountInThisIteration > requiredInputFrameCount) { + frameCountInThisIteration = requiredInputFrameCount; + } + } - /* Leftover. */ - for (; i < count; i += 1) { - float x = src_f32[i]; - x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - x = x * 32767.0f; /* -1..1 to -32767..32767 */ + result = ma_channel_converter_process_pcm_frames(&pConverter->channelConverter, pTempBufferMid, pChannelsBufferIn, frameCountInThisIteration); + if (result != MA_SUCCESS) { + return result; + } - dst_s16[i] = (ma_int16)x; - } -} -#endif -#if defined(MA_SUPPORT_AVX2) -void ma_pcm_f32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_uint64 i; - ma_uint64 i16; - ma_uint64 count16; - ma_int16* dst_s16; - const float* src_f32; - float ditherMin; - float ditherMax; - /* Both the input and output buffers need to be aligned to 32 bytes. */ - if ((((ma_uintptr)dst & 31) != 0) || (((ma_uintptr)src & 31) != 0)) { - ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); - return; + /* At this point we have converted the channels to the output channel count which we now need to resample. */ + if (pConverter->hasPostFormatConversion) { + pResampleBufferOut = pTempBufferOut; + } else { + pResampleBufferOut = pRunningFramesOut; + } + + result = ma_resampler_process_pcm_frames(&pConverter->resampler, pTempBufferMid, &frameCountInThisIteration, pResampleBufferOut, &frameCountOutThisIteration); + if (result != MA_SUCCESS) { + return result; + } + + /* Finally we can do the post format conversion. */ + if (pConverter->hasPostFormatConversion) { + if (pRunningFramesOut != NULL) { + ma_convert_pcm_frames_format(pRunningFramesOut, pConverter->config.formatOut, pResampleBufferOut, pConverter->resampler.config.format, frameCountOutThisIteration, pConverter->config.channelsOut, pConverter->config.ditherMode); + } + } + + framesProcessedIn += frameCountInThisIteration; + framesProcessedOut += frameCountOutThisIteration; + + MA_ASSERT(framesProcessedIn <= frameCountIn); + MA_ASSERT(framesProcessedOut <= frameCountOut); + + if (frameCountOutThisIteration == 0) { + break; /* Consumed all of our input data. */ + } } - dst_s16 = (ma_int16*)dst; - src_f32 = (const float*)src; + if (pFrameCountIn != NULL) { + *pFrameCountIn = framesProcessedIn; + } + if (pFrameCountOut != NULL) { + *pFrameCountOut = framesProcessedOut; + } + + return MA_SUCCESS; +} - ditherMin = 0; - ditherMax = 0; - if (ditherMode != ma_dither_mode_none) { - ditherMin = 1.0f / -32768; - ditherMax = 1.0f / 32767; +ma_result ma_data_converter_process_pcm_frames(ma_data_converter* pConverter, const void* pFramesIn, ma_uint64* pFrameCountIn, void* pFramesOut, ma_uint64* pFrameCountOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; } - i = 0; + if (pConverter->isPassthrough) { + return ma_data_converter_process_pcm_frames__passthrough(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } - /* AVX2. AVX2 allows us to output 16 s16's at a time which means our loop is unrolled 16 times. */ - count16 = count >> 4; - for (i16 = 0; i16 < count16; i16 += 1) { - __m256 d0; - __m256 d1; - __m256 x0; - __m256 x1; - __m256i i0; - __m256i i1; - __m256i p0; - __m256i p1; - __m256i r; + /* + Here is where the real work is done. Getting here means we're not using a passthrough and we need to move the data through each of the relevant stages. The order + of our stages depends on the input and output channel count. If the input channels is less than the output channels we want to do sample rate conversion first so + that it has less work (resampling is the most expensive part of format conversion). + */ + if (pConverter->config.channelsIn < pConverter->config.channelsOut) { + /* Do resampling first, if necessary. */ + MA_ASSERT(pConverter->hasChannelConverter == MA_TRUE); - if (ditherMode == ma_dither_mode_none) { - d0 = _mm256_set1_ps(0); - d1 = _mm256_set1_ps(0); - } else if (ditherMode == ma_dither_mode_rectangle) { - d0 = _mm256_set_ps( - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax) - ); - d1 = _mm256_set_ps( - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax), - ma_dither_f32_rectangle(ditherMin, ditherMax) - ); + if (pConverter->hasResampler) { + /* Resampling first. */ + return ma_data_converter_process_pcm_frames__resampling_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); } else { - d0 = _mm256_set_ps( - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax) - ); - d1 = _mm256_set_ps( - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax), - ma_dither_f32_triangle(ditherMin, ditherMax) - ); + /* Resampling not required. */ + return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); } + } else { + /* Do channel conversion first, if necessary. */ + if (pConverter->hasChannelConverter) { + if (pConverter->hasResampler) { + /* Channel routing first. */ + return ma_data_converter_process_pcm_frames__channels_first(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* Resampling not required. */ + return ma_data_converter_process_pcm_frames__channels_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + } else { + /* Channel routing not required. */ + if (pConverter->hasResampler) { + /* Resampling only. */ + return ma_data_converter_process_pcm_frames__resample_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } else { + /* No channel routing nor resampling required. Just format conversion. */ + return ma_data_converter_process_pcm_frames__format_only(pConverter, pFramesIn, pFrameCountIn, pFramesOut, pFrameCountOut); + } + } + } +} - x0 = *((__m256*)(src_f32 + i) + 0); - x1 = *((__m256*)(src_f32 + i) + 1); - - x0 = _mm256_add_ps(x0, d0); - x1 = _mm256_add_ps(x1, d1); - - x0 = _mm256_mul_ps(x0, _mm256_set1_ps(32767.0f)); - x1 = _mm256_mul_ps(x1, _mm256_set1_ps(32767.0f)); +ma_result ma_data_converter_set_rate(ma_data_converter* pConverter, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; + } - /* Computing the final result is a little more complicated for AVX2 than SSE2. */ - i0 = _mm256_cvttps_epi32(x0); - i1 = _mm256_cvttps_epi32(x1); - p0 = _mm256_permute2x128_si256(i0, i1, 0 | 32); - p1 = _mm256_permute2x128_si256(i0, i1, 1 | 48); - r = _mm256_packs_epi32(p0, p1); + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } - _mm256_stream_si256(((__m256i*)(dst_s16 + i)), r); + return ma_resampler_set_rate(&pConverter->resampler, sampleRateIn, sampleRateOut); +} - i += 16; +ma_result ma_data_converter_set_rate_ratio(ma_data_converter* pConverter, float ratioInOut) +{ + if (pConverter == NULL) { + return MA_INVALID_ARGS; } + if (pConverter->hasResampler == MA_FALSE) { + return MA_INVALID_OPERATION; /* Dynamic resampling not enabled. */ + } - /* Leftover. */ - for (; i < count; i += 1) { - float x = src_f32[i]; - x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - x = x * 32767.0f; /* -1..1 to -32767..32767 */ + return ma_resampler_set_rate_ratio(&pConverter->resampler, ratioInOut); +} - dst_s16[i] = (ma_int16)x; +ma_uint64 ma_data_converter_get_required_input_frame_count(ma_data_converter* pConverter, ma_uint64 outputFrameCount) +{ + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_required_input_frame_count(&pConverter->resampler, outputFrameCount); + } else { + return outputFrameCount; /* 1:1 */ } } -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_f32_to_s16__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) + +ma_uint64 ma_data_converter_get_expected_output_frame_count(ma_data_converter* pConverter, ma_uint64 inputFrameCount) { - /* TODO: Convert this from AVX to AVX-512. */ - ma_pcm_f32_to_s16__avx2(dst, src, count, ditherMode); + if (pConverter == NULL) { + return 0; + } + + if (pConverter->hasResampler) { + return ma_resampler_get_expected_output_frame_count(&pConverter->resampler, inputFrameCount); + } else { + return inputFrameCount; /* 1:1 */ + } } -#endif -#if defined(MA_SUPPORT_NEON) -void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) + +ma_uint64 ma_data_converter_get_input_latency(ma_data_converter* pConverter) { - ma_uint64 i; - ma_uint64 i8; - ma_uint64 count8; - ma_int16* dst_s16; - const float* src_f32; - float ditherMin; - float ditherMax; + if (pConverter == NULL) { + return 0; + } - /* Both the input and output buffers need to be aligned to 16 bytes. */ - if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { - ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); - return; + if (pConverter->hasResampler) { + return ma_resampler_get_input_latency(&pConverter->resampler); } - dst_s16 = (ma_int16*)dst; - src_f32 = (const float*)src; + return 0; /* No latency without a resampler. */ +} - ditherMin = 0; - ditherMax = 0; - if (ditherMode != ma_dither_mode_none) { - ditherMin = 1.0f / -32768; - ditherMax = 1.0f / 32767; +ma_uint64 ma_data_converter_get_output_latency(ma_data_converter* pConverter) +{ + if (pConverter == NULL) { + return 0; } - i = 0; + if (pConverter->hasResampler) { + return ma_resampler_get_output_latency(&pConverter->resampler); + } - /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ - count8 = count >> 3; - for (i8 = 0; i8 < count8; i8 += 1) { - float32x4_t d0; - float32x4_t d1; - float32x4_t x0; - float32x4_t x1; - int32x4_t i0; - int32x4_t i1; + return 0; /* No latency without a resampler. */ +} - if (ditherMode == ma_dither_mode_none) { - d0 = vmovq_n_f32(0); - d1 = vmovq_n_f32(0); - } else if (ditherMode == ma_dither_mode_rectangle) { - float d0v[4]; - d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d0 = vld1q_f32(d0v); - float d1v[4]; - d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); - d1 = vld1q_f32(d1v); - } else { - float d0v[4]; - d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); - d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); - d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); - d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); - d0 = vld1q_f32(d0v); - float d1v[4]; - d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); - d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); - d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); - d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); - d1 = vld1q_f32(d1v); - } +/************************************************************************************************************************************************************** - x0 = *((float32x4_t*)(src_f32 + i) + 0); - x1 = *((float32x4_t*)(src_f32 + i) + 1); +Format Conversion - x0 = vaddq_f32(x0, d0); - x1 = vaddq_f32(x1, d1); +**************************************************************************************************************************************************************/ - x0 = vmulq_n_f32(x0, 32767.0f); - x1 = vmulq_n_f32(x1, 32767.0f); +/* u8 */ +void ma_pcm_u8_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_uint8)); +} - i0 = vcvtq_s32_f32(x0); - i1 = vcvtq_s32_f32(x1); - *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1)); - i += 8; +static MA_INLINE void ma_pcm_u8_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_u8[i]; + x = x - 128; + x = x << 8; + dst_s16[i] = x; } + (void)ditherMode; +} - /* Leftover. */ - for (; i < count; i += 1) { - float x = src_f32[i]; - x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - x = x * 32767.0f; /* -1..1 to -32767..32767 */ +static MA_INLINE void ma_pcm_u8_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); +} - dst_s16[i] = (ma_int16)x; - } +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_u8_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); } #endif -void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +void ma_pcm_u8_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { #ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode); + ma_pcm_u8_to_s16__reference(dst, src, count, ditherMode); #else - ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s16__optimized(dst, src, count, ditherMode); + } #endif } -void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { ma_uint8* dst_s24 = (ma_uint8*)dst; - const float* src_f32 = (const float*)src; + const ma_uint8* src_u8 = (const ma_uint8*)src; ma_uint64 i; for (i = 0; i < count; i += 1) { - ma_int32 r; - float x = src_f32[i]; - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - -#if 0 - /* The accurate way. */ - x = x + 1; /* -1..1 to 0..2 */ - x = x * 8388607.5f; /* 0..2 to 0..16777215 */ - x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */ -#else - /* The fast way. */ - x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */ -#endif + ma_int16 x = src_u8[i]; + x = x - 128; - r = (ma_int32)x; - dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0); - dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8); - dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16); + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = 0; + dst_s24[i*3+2] = (ma_uint8)((ma_int8)x); } - (void)ditherMode; /* No dithering for f32 -> s24. */ + (void)ditherMode; } -void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); } #if defined(MA_SUPPORT_SSE2) -void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); } #endif #if defined(MA_SUPPORT_AVX2) -void ma_pcm_f32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); } #endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_f32_to_s24__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_u8_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); +} +#endif + +void ma_pcm_u8_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_u8_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_u8_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_u8[i]; + x = x - 128; + x = x << 24; + dst_s32[i] = x; + } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_u8_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_u8_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_u8_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s24__avx2(dst, src, count, ditherMode); + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); } #endif #if defined(MA_SUPPORT_NEON) -void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); } #endif -void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +void ma_pcm_u8_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { #ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); + ma_pcm_u8_to_s32__reference(dst, src, count, ditherMode); #else - ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_s32__optimized(dst, src, count, ditherMode); + } #endif } -void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_int32* dst_s32 = (ma_int32*)dst; - const float* src_f32 = (const float*)src; + float* dst_f32 = (float*)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; - ma_uint32 i; + ma_uint64 i; for (i = 0; i < count; i += 1) { - double x = src_f32[i]; - x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - -#if 0 - /* The accurate way. */ - x = x + 1; /* -1..1 to 0..2 */ - x = x * 2147483647.5; /* 0..2 to 0..4294967295 */ - x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */ -#else - /* The fast way. */ - x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */ -#endif + float x = (float)src_u8[i]; + x = x * 0.00784313725490196078f; /* 0..255 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ - dst_s32[i] = (ma_int32)x; + dst_f32[i] = x; } - (void)ditherMode; /* No dithering for f32 -> s32. */ + (void)ditherMode; } -void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); } #if defined(MA_SUPPORT_SSE2) -void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); } #endif #if defined(MA_SUPPORT_AVX2) -void ma_pcm_f32_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); -} -#endif -#if defined(MA_SUPPORT_AVX512) -void ma_pcm_f32_to_s32__avx512(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s32__avx2(dst, src, count, ditherMode); + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); } #endif #if defined(MA_SUPPORT_NEON) -void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +static MA_INLINE void ma_pcm_u8_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); } #endif -void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +void ma_pcm_u8_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { #ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); + ma_pcm_u8_to_f32__reference(dst, src, count, ditherMode); #else - ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_u8_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_u8_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_u8_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_u8_to_f32__optimized(dst, src, count, ditherMode); + } #endif } -void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) -{ - (void)ditherMode; - - ma_copy_memory_64(dst, src, count * sizeof(float)); -} - - -void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_interleave_u8__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) { - float* dst_f32 = (float*)dst; - const float** src_f32 = (const float**)src; + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; ma_uint64 iFrame; for (iFrame = 0; iFrame < frameCount; iFrame += 1) { ma_uint32 iChannel; for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame]; + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; } } } - -void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +#else +static MA_INLINE void ma_pcm_interleave_u8__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) { - ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8** src_u8 = (const ma_uint8**)src; + + if (channels == 1) { + ma_copy_memory_64(dst, src[0], frameCount * sizeof(ma_uint8)); + } else if (channels == 2) { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + dst_u8[iFrame*2 + 0] = src_u8[0][iFrame]; + dst_u8[iFrame*2 + 1] = src_u8[1][iFrame]; + } + } else { + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_u8[iFrame*channels + iChannel] = src_u8[iChannel][iFrame]; + } + } + } } +#endif -void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +void ma_pcm_interleave_u8(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) { #ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); + ma_pcm_interleave_u8__reference(dst, src, frameCount, channels); #else - ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels); + ma_pcm_interleave_u8__optimized(dst, src, frameCount, channels); #endif } -void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +static MA_INLINE void ma_pcm_deinterleave_u8__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) { - float** dst_f32 = (float**)dst; - const float* src_f32 = (const float*)src; + ma_uint8** dst_u8 = (ma_uint8**)dst; + const ma_uint8* src_u8 = (const ma_uint8*)src; ma_uint64 iFrame; for (iFrame = 0; iFrame < frameCount; iFrame += 1) { ma_uint32 iChannel; for (iChannel = 0; iChannel < channels; iChannel += 1) { - dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel]; + dst_u8[iChannel][iFrame] = src_u8[iFrame*channels + iChannel]; } } } -void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +static MA_INLINE void ma_pcm_deinterleave_u8__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) { - ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); } -void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +void ma_pcm_deinterleave_u8(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) { #ifdef MA_USE_REFERENCE_CONVERSION_APIS - ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); + ma_pcm_deinterleave_u8__reference(dst, src, frameCount, channels); #else - ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels); + ma_pcm_deinterleave_u8__optimized(dst, src, frameCount, channels); #endif } -void ma_format_converter_init_callbacks__default(ma_format_converter* pConverter) +/* s16 */ +static MA_INLINE void ma_pcm_s16_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_assert(pConverter != NULL); - - switch (pConverter->config.formatIn) - { - case ma_format_u8: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_u8_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_u8_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_u8_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_u8_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_u8_to_f32; - } - } break; - - case ma_format_s16: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s16_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s16_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s16_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s16_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s16_to_f32; - } - } break; + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; - case ma_format_s24: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s24_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s24_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s24_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s24_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s24_to_f32; - } - } break; + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; + x = x >> 8; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int16 x = src_s16[i]; - case ma_format_s32: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s32_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s32_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s32_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s32_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s32_to_f32; + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x80, 0x7F); + if ((x + dither) <= 0x7FFF) { + x = (ma_int16)(x + dither); + } else { + x = 0x7FFF; } - } break; - case ma_format_f32: - default: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_f32_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_f32_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_f32_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_f32_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_f32_to_f32; - } - } break; + x = x >> 8; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } } } -#if defined(MA_SUPPORT_SSE2) -void ma_format_converter_init_callbacks__sse2(ma_format_converter* pConverter) +static MA_INLINE void ma_pcm_s16_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_assert(pConverter != NULL); - - switch (pConverter->config.formatIn) - { - case ma_format_u8: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_u8_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_u8_to_s16__sse2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_u8_to_s24__sse2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_u8_to_s32__sse2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_u8_to_f32__sse2; - } - } break; - - case ma_format_s16: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s16_to_u8__sse2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s16_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s16_to_s24__sse2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s16_to_s32__sse2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s16_to_f32__sse2; - } - } break; - - case ma_format_s24: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s24_to_u8__sse2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s24_to_s16__sse2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s24_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s24_to_s32__sse2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s24_to_f32__sse2; - } - } break; - - case ma_format_s32: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s32_to_u8__sse2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s32_to_s16__sse2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s32_to_s24__sse2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s32_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s32_to_f32__sse2; - } - } break; + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +} - case ma_format_f32: - default: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_f32_to_u8__sse2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_f32_to_s16__sse2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_f32_to_s24__sse2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_f32_to_s32__sse2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_f32_to_f32; - } - } break; - } +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); } #endif - #if defined(MA_SUPPORT_AVX2) -void ma_format_converter_init_callbacks__avx2(ma_format_converter* pConverter) +static MA_INLINE void ma_pcm_s16_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_assert(pConverter != NULL); + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); +} +#endif - switch (pConverter->config.formatIn) - { - case ma_format_u8: +void ma_pcm_s16_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_u8__neon(dst, src, count, ditherMode); + } else + #endif { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_u8_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_u8_to_s16__avx2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_u8_to_s24__avx2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_u8_to_s32__avx2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_u8_to_f32__avx2; - } - } break; + ma_pcm_s16_to_u8__optimized(dst, src, count, ditherMode); + } +#endif +} - case ma_format_s16: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s16_to_u8__avx2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s16_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s16_to_s24__avx2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s16_to_s32__avx2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s16_to_f32__avx2; - } - } break; - case ma_format_s24: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s24_to_u8__avx2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s24_to_s16__avx2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s24_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s24_to_s32__avx2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s24_to_f32__avx2; - } - } break; +void ma_pcm_s16_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; + ma_copy_memory_64(dst, src, count * sizeof(ma_int16)); +} - case ma_format_s32: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s32_to_u8__avx2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s32_to_s16__avx2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s32_to_s24__avx2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s32_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s32_to_f32__avx2; - } - } break; - case ma_format_f32: - default: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_f32_to_u8__avx2; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_f32_to_s16__avx2; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_f32_to_s24__avx2; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_f32_to_s32__avx2; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_f32_to_f32; - } - } break; +static MA_INLINE void ma_pcm_s16_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; + + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s24[i*3+0] = 0; + dst_s24[i*3+1] = (ma_uint8)(src_s16[i] & 0xFF); + dst_s24[i*3+2] = (ma_uint8)(src_s16[i] >> 8); } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +} + +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s16_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); } #endif - -#if defined(MA_SUPPORT_AVX512) -void ma_format_converter_init_callbacks__avx512(ma_format_converter* pConverter) +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s16_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_assert(pConverter != NULL); - - switch (pConverter->config.formatIn) - { - case ma_format_u8: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_u8_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_u8_to_s16__avx512; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_u8_to_s24__avx512; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_u8_to_s32__avx512; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_u8_to_f32__avx512; - } - } break; + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); +} +#endif - case ma_format_s16: +void ma_pcm_s16_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_s24__neon(dst, src, count, ditherMode); + } else + #endif { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s16_to_u8__avx512; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s16_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s16_to_s24__avx512; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s16_to_s32__avx512; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s16_to_f32__avx512; - } - } break; + ma_pcm_s16_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} - case ma_format_s24: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s24_to_u8__avx512; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s24_to_s16__avx512; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s24_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s24_to_s32__avx512; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s24_to_f32__avx512; - } - } break; - case ma_format_s32: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s32_to_u8__avx512; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s32_to_s16__avx512; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s32_to_s24__avx512; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s32_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s32_to_f32__avx512; - } - } break; +static MA_INLINE void ma_pcm_s16_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; - case ma_format_f32: - default: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_f32_to_u8__avx512; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_f32_to_s16__avx512; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_f32_to_s24__avx512; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_f32_to_s32__avx512; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_f32_to_f32; - } - } break; + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = src_s16[i] << 16; } + + (void)ditherMode; +} + +static MA_INLINE void ma_pcm_s16_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); } -#endif +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s16_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif #if defined(MA_SUPPORT_NEON) -void ma_format_converter_init_callbacks__neon(ma_format_converter* pConverter) +static MA_INLINE void ma_pcm_s16_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_assert(pConverter != NULL); + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); +} +#endif - switch (pConverter->config.formatIn) - { - case ma_format_u8: +void ma_pcm_s16_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_s32__neon(dst, src, count, ditherMode); + } else + #endif { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_u8_to_u8; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_u8_to_s16__neon; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_u8_to_s24__neon; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_u8_to_s32__neon; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_u8_to_f32__neon; - } - } break; + ma_pcm_s16_to_s32__optimized(dst, src, count, ditherMode); + } +#endif +} - case ma_format_s16: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s16_to_u8__neon; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s16_to_s16; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s16_to_s24__neon; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s16_to_s32__neon; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s16_to_f32__neon; - } - } break; - case ma_format_s24: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s24_to_u8__neon; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s24_to_s16__neon; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s24_to_s24; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s24_to_s32__neon; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s24_to_f32__neon; - } - } break; +static MA_INLINE void ma_pcm_s16_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int16* src_s16 = (const ma_int16*)src; - case ma_format_s32: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_s32_to_u8__neon; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_s32_to_s16__neon; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_s32_to_s24__neon; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_s32_to_s32; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_s32_to_f32__neon; - } - } break; + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)src_s16[i]; - case ma_format_f32: - default: - { - if (pConverter->config.formatOut == ma_format_u8) { - pConverter->onConvertPCM = ma_pcm_f32_to_u8__neon; - } else if (pConverter->config.formatOut == ma_format_s16) { - pConverter->onConvertPCM = ma_pcm_f32_to_s16__neon; - } else if (pConverter->config.formatOut == ma_format_s24) { - pConverter->onConvertPCM = ma_pcm_f32_to_s24__neon; - } else if (pConverter->config.formatOut == ma_format_s32) { - pConverter->onConvertPCM = ma_pcm_f32_to_s32__neon; - } else if (pConverter->config.formatOut == ma_format_f32) { - pConverter->onConvertPCM = ma_pcm_f32_to_f32; - } - } break; - } -} +#if 0 + /* The accurate way. */ + x = x + 32768.0f; /* -32768..32767 to 0..65535 */ + x = x * 0.00003051804379339284f; /* 0..65535 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.000030517578125f; /* -32768..32767 to -1..0.999969482421875 */ #endif -ma_result ma_format_converter_init(const ma_format_converter_config* pConfig, ma_format_converter* pConverter) -{ - if (pConverter == NULL) { - return MA_INVALID_ARGS; - } - ma_zero_object(pConverter); - - if (pConfig == NULL) { - return MA_INVALID_ARGS; + dst_f32[i] = x; } - pConverter->config = *pConfig; + (void)ditherMode; +} - /* SIMD */ - pConverter->useSSE2 = ma_has_sse2() && !pConfig->noSSE2; - pConverter->useAVX2 = ma_has_avx2() && !pConfig->noAVX2; - pConverter->useAVX512 = ma_has_avx512f() && !pConfig->noAVX512; - pConverter->useNEON = ma_has_neon() && !pConfig->noNEON; +static MA_INLINE void ma_pcm_s16_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +} -#if defined(MA_SUPPORT_AVX512) - if (pConverter->useAVX512) { - ma_format_converter_init_callbacks__avx512(pConverter); - } else +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s16_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} #endif #if defined(MA_SUPPORT_AVX2) - if (pConverter->useAVX2) { - ma_format_converter_init_callbacks__avx2(pConverter); - } else -#endif -#if defined(MA_SUPPORT_SSE2) - if (pConverter->useSSE2) { - ma_format_converter_init_callbacks__sse2(pConverter); - } else +static MA_INLINE void ma_pcm_s16_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} #endif #if defined(MA_SUPPORT_NEON) - if (pConverter->useNEON) { - ma_format_converter_init_callbacks__neon(pConverter); - } else +static MA_INLINE void ma_pcm_s16_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); +} #endif - { - ma_format_converter_init_callbacks__default(pConverter); - } - switch (pConfig->formatOut) - { - case ma_format_u8: - { - pConverter->onInterleavePCM = ma_pcm_interleave_u8; - pConverter->onDeinterleavePCM = ma_pcm_deinterleave_u8; - } break; - case ma_format_s16: - { - pConverter->onInterleavePCM = ma_pcm_interleave_s16; - pConverter->onDeinterleavePCM = ma_pcm_deinterleave_s16; - } break; - case ma_format_s24: - { - pConverter->onInterleavePCM = ma_pcm_interleave_s24; - pConverter->onDeinterleavePCM = ma_pcm_deinterleave_s24; - } break; - case ma_format_s32: - { - pConverter->onInterleavePCM = ma_pcm_interleave_s32; - pConverter->onDeinterleavePCM = ma_pcm_deinterleave_s32; - } break; - case ma_format_f32: - default: +void ma_pcm_s16_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s16_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s16_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s16_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s16_to_f32__neon(dst, src, count, ditherMode); + } else + #endif { - pConverter->onInterleavePCM = ma_pcm_interleave_f32; - pConverter->onDeinterleavePCM = ma_pcm_deinterleave_f32; - } break; + ma_pcm_s16_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} + + +static MA_INLINE void ma_pcm_interleave_s16__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int16** src_s16 = (const ma_int16**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iFrame*channels + iChannel] = src_s16[iChannel][iFrame]; + } } +} - return MA_SUCCESS; +static MA_INLINE void ma_pcm_interleave_s16__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); } -ma_uint64 ma_format_converter_read(ma_format_converter* pConverter, ma_uint64 frameCount, void* pFramesOut, void* pUserData) +void ma_pcm_interleave_s16(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) { - ma_uint64 totalFramesRead; - ma_uint32 sampleSizeIn; - ma_uint32 sampleSizeOut; - ma_uint32 frameSizeOut; - ma_uint8* pNextFramesOut; +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s16__optimized(dst, src, frameCount, channels); +#endif +} - if (pConverter == NULL || pFramesOut == NULL) { - return 0; - } - totalFramesRead = 0; - sampleSizeIn = ma_get_bytes_per_sample(pConverter->config.formatIn); - sampleSizeOut = ma_get_bytes_per_sample(pConverter->config.formatOut); - /*frameSizeIn = sampleSizeIn * pConverter->config.channels;*/ - frameSizeOut = sampleSizeOut * pConverter->config.channels; - pNextFramesOut = (ma_uint8*)pFramesOut; - - if (pConverter->config.onRead != NULL) { - /* Input data is interleaved. */ - if (pConverter->config.formatIn == pConverter->config.formatOut) { - /* Pass through. */ - while (totalFramesRead < frameCount) { - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > 0xFFFFFFFF) { - framesToReadRightNow = 0xFFFFFFFF; - } +static MA_INLINE void ma_pcm_deinterleave_s16__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int16** dst_s16 = (ma_int16**)dst; + const ma_int16* src_s16 = (const ma_int16*)src; - framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, pNextFramesOut, pUserData); - if (framesJustRead == 0) { - break; - } + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s16[iChannel][iFrame] = src_s16[iFrame*channels + iChannel]; + } + } +} - totalFramesRead += framesJustRead; - pNextFramesOut += framesJustRead * frameSizeOut; +static MA_INLINE void ma_pcm_deinterleave_s16__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +} - if (framesJustRead < framesToReadRightNow) { - break; - } - } - } else { - /* Conversion required. */ - ma_uint32 maxFramesToReadAtATime; +void ma_pcm_deinterleave_s16(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s16__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s16__optimized(dst, src, frameCount, channels); +#endif +} - MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 temp[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128]; - ma_assert(sizeof(temp) <= 0xFFFFFFFF); - maxFramesToReadAtATime = sizeof(temp) / sampleSizeIn / pConverter->config.channels; +/* s24 */ +static MA_INLINE void ma_pcm_s24_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; - while (totalFramesRead < frameCount) { - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > maxFramesToReadAtATime) { - framesToReadRightNow = maxFramesToReadAtATime; - } + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int8 x = (ma_int8)src_s24[i*3 + 2] + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); - framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, temp, pUserData); - if (framesJustRead == 0) { - break; - } + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } +} - pConverter->onConvertPCM(pNextFramesOut, temp, framesJustRead*pConverter->config.channels, pConverter->config.ditherMode); +static MA_INLINE void ma_pcm_s24_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +} - totalFramesRead += framesJustRead; - pNextFramesOut += framesJustRead * frameSizeOut; +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); +} +#endif - if (framesJustRead < framesToReadRightNow) { - break; - } - } +void ma_pcm_s24_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_u8__optimized(dst, src, count, ditherMode); } - } else { - /* Input data is deinterleaved. If a conversion is required we need to do an intermediary step. */ - void* ppTempSamplesOfOutFormat[MA_MAX_CHANNELS]; - size_t splitBufferSizeOut; - ma_uint32 maxFramesToReadAtATime; - - MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfOutFormat[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128]; - ma_assert(sizeof(tempSamplesOfOutFormat) <= 0xFFFFFFFF); +#endif +} - ma_split_buffer(tempSamplesOfOutFormat, sizeof(tempSamplesOfOutFormat), pConverter->config.channels, MA_SIMD_ALIGNMENT, (void**)&ppTempSamplesOfOutFormat, &splitBufferSizeOut); - maxFramesToReadAtATime = (ma_uint32)(splitBufferSizeOut / sampleSizeIn); +static MA_INLINE void ma_pcm_s24_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; - while (totalFramesRead < frameCount) { - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > maxFramesToReadAtATime) { - framesToReadRightNow = maxFramesToReadAtATime; - } + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint16 dst_lo = ((ma_uint16)src_s24[i*3 + 1]); + ma_uint16 dst_hi = ((ma_uint16)src_s24[i*3 + 2]) << 8; + dst_s16[i] = (ma_int16)dst_lo | dst_hi; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); - if (pConverter->config.formatIn == pConverter->config.formatOut) { - /* Only interleaving. */ - framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, ppTempSamplesOfOutFormat, pUserData); - if (framesJustRead == 0) { - break; - } + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; } else { - /* Interleaving + Conversion. Convert first, then interleave. */ - void* ppTempSamplesOfInFormat[MA_MAX_CHANNELS]; - size_t splitBufferSizeIn; - ma_uint32 iChannel; - - MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfInFormat[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128]; - - ma_split_buffer(tempSamplesOfInFormat, sizeof(tempSamplesOfInFormat), pConverter->config.channels, MA_SIMD_ALIGNMENT, (void**)&ppTempSamplesOfInFormat, &splitBufferSizeIn); - - if (framesToReadRightNow > (splitBufferSizeIn / sampleSizeIn)) { - framesToReadRightNow = (splitBufferSizeIn / sampleSizeIn); - } - - framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, ppTempSamplesOfInFormat, pUserData); - if (framesJustRead == 0) { - break; - } - - for (iChannel = 0; iChannel < pConverter->config.channels; iChannel += 1) { - pConverter->onConvertPCM(ppTempSamplesOfOutFormat[iChannel], ppTempSamplesOfInFormat[iChannel], framesJustRead, pConverter->config.ditherMode); - } + x = 0x7FFFFFFF; } - pConverter->onInterleavePCM(pNextFramesOut, (const void**)ppTempSamplesOfOutFormat, framesJustRead, pConverter->config.channels); - - totalFramesRead += framesJustRead; - pNextFramesOut += framesJustRead * frameSizeOut; - - if (framesJustRead < framesToReadRightNow) { - break; - } + x = x >> 16; + dst_s16[i] = (ma_int16)x; } } - - return totalFramesRead; } -ma_uint64 ma_format_converter_read_deinterleaved(ma_format_converter* pConverter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData) +static MA_INLINE void ma_pcm_s24_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_uint64 totalFramesRead; - ma_uint32 sampleSizeIn; - ma_uint32 sampleSizeOut; - ma_uint8* ppNextSamplesOut[MA_MAX_CHANNELS]; - - if (pConverter == NULL || ppSamplesOut == NULL) { - return 0; - } + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +} - totalFramesRead = 0; - sampleSizeIn = ma_get_bytes_per_sample(pConverter->config.formatIn); - sampleSizeOut = ma_get_bytes_per_sample(pConverter->config.formatOut); +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); +} +#endif - ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(void*) * pConverter->config.channels); +void ma_pcm_s24_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_s16__optimized(dst, src, count, ditherMode); + } +#endif +} - if (pConverter->config.onRead != NULL) { - /* Input data is interleaved. */ - ma_uint32 maxFramesToReadAtATime; - MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfOutFormat[MA_MAX_CHANNELS * MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128]; - ma_assert(sizeof(tempSamplesOfOutFormat) <= 0xFFFFFFFF); +void ma_pcm_s24_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; - maxFramesToReadAtATime = sizeof(tempSamplesOfOutFormat) / sampleSizeIn / pConverter->config.channels; + ma_copy_memory_64(dst, src, count * 3); +} - while (totalFramesRead < frameCount) { - ma_uint32 iChannel; - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > maxFramesToReadAtATime) { - framesToReadRightNow = maxFramesToReadAtATime; - } - if (pConverter->config.formatIn == pConverter->config.formatOut) { - /* Only de-interleaving. */ - framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, tempSamplesOfOutFormat, pUserData); - if (framesJustRead == 0) { - break; - } - } else { - /* De-interleaving + Conversion. Convert first, then de-interleave. */ - MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 tempSamplesOfInFormat[sizeof(tempSamplesOfOutFormat)]; +static MA_INLINE void ma_pcm_s24_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; - framesJustRead = (ma_uint32)pConverter->config.onRead(pConverter, (ma_uint32)framesToReadRightNow, tempSamplesOfInFormat, pUserData); - if (framesJustRead == 0) { - break; - } + ma_uint64 i; + for (i = 0; i < count; i += 1) { + dst_s32[i] = (ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24); + } - pConverter->onConvertPCM(tempSamplesOfOutFormat, tempSamplesOfInFormat, framesJustRead * pConverter->config.channels, pConverter->config.ditherMode); - } + (void)ditherMode; +} - pConverter->onDeinterleavePCM((void**)ppNextSamplesOut, tempSamplesOfOutFormat, framesJustRead, pConverter->config.channels); +static MA_INLINE void ma_pcm_s24_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +} - totalFramesRead += framesJustRead; - for (iChannel = 0; iChannel < pConverter->config.channels; ++iChannel) { - ppNextSamplesOut[iChannel] += framesJustRead * sampleSizeOut; - } +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); +} +#endif - if (framesJustRead < framesToReadRightNow) { - break; - } +void ma_pcm_s24_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_s32__optimized(dst, src, count, ditherMode); } - } else { - /* Input data is deinterleaved. */ - if (pConverter->config.formatIn == pConverter->config.formatOut) { - /* Pass through. */ - while (totalFramesRead < frameCount) { - ma_uint32 iChannel; - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > 0xFFFFFFFF) { - framesToReadRightNow = 0xFFFFFFFF; - } +#endif +} - framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, (void**)ppNextSamplesOut, pUserData); - if (framesJustRead == 0) { - break; - } - totalFramesRead += framesJustRead; - for (iChannel = 0; iChannel < pConverter->config.channels; ++iChannel) { - ppNextSamplesOut[iChannel] += framesJustRead * sampleSizeOut; - } +static MA_INLINE void ma_pcm_s24_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_uint8* src_s24 = (const ma_uint8*)src; - if (framesJustRead < framesToReadRightNow) { - break; - } - } - } else { - /* Conversion required. */ - void* ppTemp[MA_MAX_CHANNELS]; - size_t splitBufferSize; - ma_uint32 maxFramesToReadAtATime; + ma_uint64 i; + for (i = 0; i < count; i += 1) { + float x = (float)(((ma_int32)(((ma_uint32)(src_s24[i*3+0]) << 8) | ((ma_uint32)(src_s24[i*3+1]) << 16) | ((ma_uint32)(src_s24[i*3+2])) << 24)) >> 8); - MA_ALIGN(MA_SIMD_ALIGNMENT) ma_uint8 temp[MA_MAX_CHANNELS][MA_MAX_PCM_SAMPLE_SIZE_IN_BYTES * 128]; - ma_assert(sizeof(temp) <= 0xFFFFFFFF); +#if 0 + /* The accurate way. */ + x = x + 8388608.0f; /* -8388608..8388607 to 0..16777215 */ + x = x * 0.00000011920929665621f; /* 0..16777215 to 0..2 */ + x = x - 1; /* 0..2 to -1..1 */ +#else + /* The fast way. */ + x = x * 0.00000011920928955078125f; /* -8388608..8388607 to -1..0.999969482421875 */ +#endif - ma_split_buffer(temp, sizeof(temp), pConverter->config.channels, MA_SIMD_ALIGNMENT, (void**)&ppTemp, &splitBufferSize); + dst_f32[i] = x; + } - maxFramesToReadAtATime = (ma_uint32)(splitBufferSize / sampleSizeIn); + (void)ditherMode; +} - while (totalFramesRead < frameCount) { - ma_uint32 iChannel; - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > maxFramesToReadAtATime) { - framesToReadRightNow = maxFramesToReadAtATime; - } +static MA_INLINE void ma_pcm_s24_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +} - framesJustRead = (ma_uint32)pConverter->config.onReadDeinterleaved(pConverter, (ma_uint32)framesToReadRightNow, ppTemp, pUserData); - if (framesJustRead == 0) { - break; - } +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s24_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s24_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s24_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); +} +#endif - for (iChannel = 0; iChannel < pConverter->config.channels; iChannel += 1) { - pConverter->onConvertPCM(ppNextSamplesOut[iChannel], ppTemp[iChannel], framesJustRead, pConverter->config.ditherMode); - ppNextSamplesOut[iChannel] += framesJustRead * sampleSizeOut; - } +void ma_pcm_s24_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s24_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s24_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s24_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s24_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s24_to_f32__optimized(dst, src, count, ditherMode); + } +#endif +} - totalFramesRead += framesJustRead; - if (framesJustRead < framesToReadRightNow) { - break; - } - } +static MA_INLINE void ma_pcm_interleave_s24__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_uint8* dst8 = (ma_uint8*)dst; + const ma_uint8** src8 = (const ma_uint8**)src; + + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iFrame*3*channels + iChannel*3 + 0] = src8[iChannel][iFrame*3 + 0]; + dst8[iFrame*3*channels + iChannel*3 + 1] = src8[iChannel][iFrame*3 + 1]; + dst8[iFrame*3*channels + iChannel*3 + 2] = src8[iChannel][iFrame*3 + 2]; } } +} - return totalFramesRead; +static MA_INLINE void ma_pcm_interleave_s24__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +} + +void ma_pcm_interleave_s24(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s24__optimized(dst, src, frameCount, channels); +#endif } -ma_format_converter_config ma_format_converter_config_init_new() +static MA_INLINE void ma_pcm_deinterleave_s24__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) { - ma_format_converter_config config; - ma_zero_object(&config); + ma_uint8** dst8 = (ma_uint8**)dst; + const ma_uint8* src8 = (const ma_uint8*)src; - return config; + ma_uint32 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst8[iChannel][iFrame*3 + 0] = src8[iFrame*3*channels + iChannel*3 + 0]; + dst8[iChannel][iFrame*3 + 1] = src8[iFrame*3*channels + iChannel*3 + 1]; + dst8[iChannel][iFrame*3 + 2] = src8[iFrame*3*channels + iChannel*3 + 2]; + } + } } -ma_format_converter_config ma_format_converter_config_init(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_proc onRead, void* pUserData) +static MA_INLINE void ma_pcm_deinterleave_s24__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) { - ma_format_converter_config config = ma_format_converter_config_init_new(); - config.formatIn = formatIn; - config.formatOut = formatOut; - config.channels = channels; - config.onRead = onRead; - config.onReadDeinterleaved = NULL; - config.pUserData = pUserData; - - return config; + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); } -ma_format_converter_config ma_format_converter_config_init_deinterleaved(ma_format formatIn, ma_format formatOut, ma_uint32 channels, ma_format_converter_read_deinterleaved_proc onReadDeinterleaved, void* pUserData) +void ma_pcm_deinterleave_s24(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) { - ma_format_converter_config config = ma_format_converter_config_init(formatIn, formatOut, channels, NULL, pUserData); - config.onReadDeinterleaved = onReadDeinterleaved; - - return config; +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s24__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s24__optimized(dst, src, frameCount, channels); +#endif } -/************************************************************************************************************************************************************** - -Channel Routing - -**************************************************************************************************************************************************************/ - -/* --X = Left, +X = Right --Y = Bottom, +Y = Top --Z = Front, +Z = Back -*/ -typedef struct -{ - float x; - float y; - float z; -} ma_vec3; - -static MA_INLINE ma_vec3 ma_vec3f(float x, float y, float z) +/* s32 */ +static MA_INLINE void ma_pcm_s32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_vec3 r; - r.x = x; - r.y = y; - r.z = z; + ma_uint8* dst_u8 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; - return r; -} + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; -static MA_INLINE ma_vec3 ma_vec3_add(ma_vec3 a, ma_vec3 b) -{ - return ma_vec3f( - a.x + b.x, - a.y + b.y, - a.z + b.z - ); + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x800000, 0x7FFFFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 24; + x = x + 128; + dst_u8[i] = (ma_uint8)x; + } + } } -static MA_INLINE ma_vec3 ma_vec3_sub(ma_vec3 a, ma_vec3 b) +static MA_INLINE void ma_pcm_s32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return ma_vec3f( - a.x - b.x, - a.y - b.y, - a.z - b.z - ); + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); } -static MA_INLINE ma_vec3 ma_vec3_mul(ma_vec3 a, ma_vec3 b) +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return ma_vec3f( - a.x * b.x, - a.y * b.y, - a.z * b.z - ); + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); } - -static MA_INLINE ma_vec3 ma_vec3_div(ma_vec3 a, ma_vec3 b) +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return ma_vec3f( - a.x / b.x, - a.y / b.y, - a.z / b.z - ); + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); } - -static MA_INLINE float ma_vec3_dot(ma_vec3 a, ma_vec3 b) +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return a.x*b.x + a.y*b.y + a.z*b.z; + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); } +#endif -static MA_INLINE float ma_vec3_length2(ma_vec3 a) +void ma_pcm_s32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return ma_vec3_dot(a, a); +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_u8__optimized(dst, src, count, ditherMode); + } +#endif } -static MA_INLINE float ma_vec3_length(ma_vec3 a) -{ - return (float)sqrt(ma_vec3_length2(a)); -} -static MA_INLINE ma_vec3 ma_vec3_normalize(ma_vec3 a) +static MA_INLINE void ma_pcm_s32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - float len = 1 / ma_vec3_length(a); + ma_int16* dst_s16 = (ma_int16*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; - ma_vec3 r; - r.x = a.x * len; - r.y = a.y * len; - r.z = a.z * len; + if (ditherMode == ma_dither_mode_none) { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } else { + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 x = src_s32[i]; - return r; + /* Dither. Don't overflow. */ + ma_int32 dither = ma_dither_s32(ditherMode, -0x8000, 0x7FFF); + if ((ma_int64)x + dither <= 0x7FFFFFFF) { + x = x + dither; + } else { + x = 0x7FFFFFFF; + } + + x = x >> 16; + dst_s16[i] = (ma_int16)x; + } + } } -static MA_INLINE float ma_vec3_distance(ma_vec3 a, ma_vec3 b) +static MA_INLINE void ma_pcm_s32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return ma_vec3_length(ma_vec3_sub(a, b)); + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); } - -#define MA_PLANE_LEFT 0 -#define MA_PLANE_RIGHT 1 -#define MA_PLANE_FRONT 2 -#define MA_PLANE_BACK 3 -#define MA_PLANE_BOTTOM 4 -#define MA_PLANE_TOP 5 - -float g_maChannelPlaneRatios[MA_CHANNEL_POSITION_COUNT][6] = { - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_NONE */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_MONO */ - { 0.5f, 0.0f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT */ - { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT */ - { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_CENTER */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_LFE */ - { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_LEFT */ - { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_RIGHT */ - { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_LEFT_CENTER */ - { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_FRONT_RIGHT_CENTER */ - { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, /* MA_CHANNEL_BACK_CENTER */ - { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_LEFT */ - { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_SIDE_RIGHT */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f}, /* MA_CHANNEL_TOP_CENTER */ - { 0.33f, 0.0f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_LEFT */ - { 0.0f, 0.0f, 0.5f, 0.0f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_FRONT_CENTER */ - { 0.0f, 0.33f, 0.33f, 0.0f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_FRONT_RIGHT */ - { 0.33f, 0.0f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_LEFT */ - { 0.0f, 0.0f, 0.0f, 0.5f, 0.0f, 0.5f}, /* MA_CHANNEL_TOP_BACK_CENTER */ - { 0.0f, 0.33f, 0.0f, 0.33f, 0.0f, 0.34f}, /* MA_CHANNEL_TOP_BACK_RIGHT */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_0 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_1 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_2 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_3 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_4 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_5 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_6 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_7 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_8 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_9 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_10 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_11 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_12 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_13 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_14 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_15 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_16 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_17 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_18 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_19 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_20 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_21 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_22 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_23 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_24 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_25 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_26 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_27 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_28 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_29 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_30 */ - { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, /* MA_CHANNEL_AUX_31 */ -}; - -float ma_calculate_channel_position_planar_weight(ma_channel channelPositionA, ma_channel channelPositionB) +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - /* - Imagine the following simplified example: You have a single input speaker which is the front/left speaker which you want to convert to - the following output configuration: - - - front/left - - side/left - - back/left - - The front/left output is easy - it the same speaker position so it receives the full contribution of the front/left input. The amount - of contribution to apply to the side/left and back/left speakers, however, is a bit more complicated. - - Imagine the front/left speaker as emitting audio from two planes - the front plane and the left plane. You can think of the front/left - speaker emitting half of it's total volume from the front, and the other half from the left. Since part of it's volume is being emitted - from the left side, and the side/left and back/left channels also emit audio from the left plane, one would expect that they would - receive some amount of contribution from front/left speaker. The amount of contribution depends on how many planes are shared between - the two speakers. Note that in the examples below I've added a top/front/left speaker as an example just to show how the math works - across 3 spatial dimensions. - - The first thing to do is figure out how each speaker's volume is spread over each of plane: - - front/left: 2 planes (front and left) = 1/2 = half it's total volume on each plane - - side/left: 1 plane (left only) = 1/1 = entire volume from left plane - - back/left: 2 planes (back and left) = 1/2 = half it's total volume on each plane - - top/front/left: 3 planes (top, front and left) = 1/3 = one third it's total volume on each plane - - The amount of volume each channel contributes to each of it's planes is what controls how much it is willing to given and take to other - channels on the same plane. The volume that is willing to the given by one channel is multiplied by the volume that is willing to be - taken by the other to produce the final contribution. - */ - - /* Contribution = Sum(Volume to Give * Volume to Take) */ - float contribution = - g_maChannelPlaneRatios[channelPositionA][0] * g_maChannelPlaneRatios[channelPositionB][0] + - g_maChannelPlaneRatios[channelPositionA][1] * g_maChannelPlaneRatios[channelPositionB][1] + - g_maChannelPlaneRatios[channelPositionA][2] * g_maChannelPlaneRatios[channelPositionB][2] + - g_maChannelPlaneRatios[channelPositionA][3] * g_maChannelPlaneRatios[channelPositionB][3] + - g_maChannelPlaneRatios[channelPositionA][4] * g_maChannelPlaneRatios[channelPositionB][4] + - g_maChannelPlaneRatios[channelPositionA][5] * g_maChannelPlaneRatios[channelPositionB][5]; - - return contribution; + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); } - -float ma_channel_router__calculate_input_channel_planar_weight(const ma_channel_router* pRouter, ma_channel channelPositionIn, ma_channel channelPositionOut) +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_assert(pRouter != NULL); - (void)pRouter; - - return ma_calculate_channel_position_planar_weight(channelPositionIn, channelPositionOut); + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); } - -ma_bool32 ma_channel_router__is_spatial_channel_position(const ma_channel_router* pRouter, ma_channel channelPosition) +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - int i; - - ma_assert(pRouter != NULL); - (void)pRouter; - - if (channelPosition == MA_CHANNEL_NONE || channelPosition == MA_CHANNEL_MONO || channelPosition == MA_CHANNEL_LFE) { - return MA_FALSE; - } + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); +} +#endif - for (i = 0; i < 6; ++i) { - if (g_maChannelPlaneRatios[channelPosition][i] != 0) { - return MA_TRUE; +void ma_pcm_s32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_s16__optimized(dst, src, count, ditherMode); } - } - - return MA_FALSE; +#endif } -ma_result ma_channel_router_init(const ma_channel_router_config* pConfig, ma_channel_router* pRouter) + +static MA_INLINE void ma_pcm_s32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_uint32 iChannelIn; - ma_uint32 iChannelOut; + ma_uint8* dst_s24 = (ma_uint8*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; - if (pRouter == NULL) { - return MA_INVALID_ARGS; + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_uint32 x = (ma_uint32)src_s32[i]; + dst_s24[i*3+0] = (ma_uint8)((x & 0x0000FF00) >> 8); + dst_s24[i*3+1] = (ma_uint8)((x & 0x00FF0000) >> 16); + dst_s24[i*3+2] = (ma_uint8)((x & 0xFF000000) >> 24); } - ma_zero_object(pRouter); + (void)ditherMode; /* No dithering for s32 -> s24. */ +} - if (pConfig == NULL) { - return MA_INVALID_ARGS; - } - if (pConfig->onReadDeinterleaved == NULL) { - return MA_INVALID_ARGS; - } +static MA_INLINE void ma_pcm_s32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +} - if (!ma_channel_map_valid(pConfig->channelsIn, pConfig->channelMapIn)) { - return MA_INVALID_ARGS; /* Invalid input channel map. */ - } - if (!ma_channel_map_valid(pConfig->channelsOut, pConfig->channelMapOut)) { - return MA_INVALID_ARGS; /* Invalid output channel map. */ - } +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); +} +#endif - pRouter->config = *pConfig; +void ma_pcm_s32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif +} - /* SIMD */ - pRouter->useSSE2 = ma_has_sse2() && !pConfig->noSSE2; - pRouter->useAVX2 = ma_has_avx2() && !pConfig->noAVX2; - pRouter->useAVX512 = ma_has_avx512f() && !pConfig->noAVX512; - pRouter->useNEON = ma_has_neon() && !pConfig->noNEON; - /* If the input and output channels and channel maps are the same we should use a passthrough. */ - if (pRouter->config.channelsIn == pRouter->config.channelsOut) { - if (ma_channel_map_equal(pRouter->config.channelsIn, pRouter->config.channelMapIn, pRouter->config.channelMapOut)) { - pRouter->isPassthrough = MA_TRUE; - } - if (ma_channel_map_blank(pRouter->config.channelsIn, pRouter->config.channelMapIn) || ma_channel_map_blank(pRouter->config.channelsOut, pRouter->config.channelMapOut)) { - pRouter->isPassthrough = MA_TRUE; - } - } +void ma_pcm_s32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; - /* - We can use a simple case for expanding the mono channel. This will when expanding a mono input into any output so long - as no LFE is present in the output. - */ - if (!pRouter->isPassthrough) { - if (pRouter->config.channelsIn == 1 && pRouter->config.channelMapIn[0] == MA_CHANNEL_MONO) { - /* Optimal case if no LFE is in the output channel map. */ - pRouter->isSimpleMonoExpansion = MA_TRUE; - if (ma_channel_map_contains_channel_position(pRouter->config.channelsOut, pRouter->config.channelMapOut, MA_CHANNEL_LFE)) { - pRouter->isSimpleMonoExpansion = MA_FALSE; - } - } - } + ma_copy_memory_64(dst, src, count * sizeof(ma_int32)); +} - /* Another optimized case is stereo to mono. */ - if (!pRouter->isPassthrough) { - if (pRouter->config.channelsOut == 1 && pRouter->config.channelMapOut[0] == MA_CHANNEL_MONO && pRouter->config.channelsIn == 2) { - /* Optimal case if no LFE is in the input channel map. */ - pRouter->isStereoToMono = MA_TRUE; - if (ma_channel_map_contains_channel_position(pRouter->config.channelsIn, pRouter->config.channelMapIn, MA_CHANNEL_LFE)) { - pRouter->isStereoToMono = MA_FALSE; - } - } - } - /* - Here is where we do a bit of pre-processing to know how each channel should be combined to make up the output. Rules: - - 1) If it's a passthrough, do nothing - it's just a simple memcpy(). - 2) If the channel counts are the same and every channel position in the input map is present in the output map, use a - simple shuffle. An example might be different 5.1 channel layouts. - 3) Otherwise channels are blended based on spatial locality. - */ - if (!pRouter->isPassthrough) { - if (pRouter->config.channelsIn == pRouter->config.channelsOut) { - ma_bool32 areAllChannelPositionsPresent = MA_TRUE; - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_bool32 isInputChannelPositionInOutput = MA_FALSE; - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - if (pRouter->config.channelMapIn[iChannelIn] == pRouter->config.channelMapOut[iChannelOut]) { - isInputChannelPositionInOutput = MA_TRUE; - break; - } - } +static MA_INLINE void ma_pcm_s32_to_f32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + float* dst_f32 = (float*)dst; + const ma_int32* src_s32 = (const ma_int32*)src; - if (!isInputChannelPositionInOutput) { - areAllChannelPositionsPresent = MA_FALSE; - break; - } - } + ma_uint64 i; + for (i = 0; i < count; i += 1) { + double x = src_s32[i]; - if (areAllChannelPositionsPresent) { - pRouter->isSimpleShuffle = MA_TRUE; +#if 0 + x = x + 2147483648.0; + x = x * 0.0000000004656612873077392578125; + x = x - 1; +#else + x = x / 2147483648.0; +#endif - /* - All the router will be doing is rearranging channels which means all we need to do is use a shuffling table which is just - a mapping between the index of the input channel to the index of the output channel. - */ - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - if (pRouter->config.channelMapIn[iChannelIn] == pRouter->config.channelMapOut[iChannelOut]) { - pRouter->shuffleTable[iChannelIn] = (ma_uint8)iChannelOut; - break; - } - } - } - } - } + dst_f32[i] = (float)x; } + (void)ditherMode; /* No dithering for s32 -> f32. */ +} - /* - Here is where weights are calculated. Note that we calculate the weights at all times, even when using a passthrough and simple - shuffling. We use different algorithms for calculating weights depending on our mixing mode. - - In simple mode we don't do any blending (except for converting between mono, which is done in a later step). Instead we just - map 1:1 matching channels. In this mode, if no channels in the input channel map correspond to anything in the output channel - map, nothing will be heard! - */ - - /* In all cases we need to make sure all channels that are present in both channel maps have a 1:1 mapping. */ - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; +static MA_INLINE void ma_pcm_s32_to_f32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +} - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_s32_to_f32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_s32_to_f32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_s32_to_f32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); +} +#endif - if (channelPosIn == channelPosOut) { - pRouter->config.weights[iChannelIn][iChannelOut] = 1; - } +void ma_pcm_s32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_s32_to_f32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_s32_to_f32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_s32_to_f32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_s32_to_f32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_s32_to_f32__optimized(dst, src, count, ditherMode); } - } +#endif +} - /* - The mono channel is accumulated on all other channels, except LFE. Make sure in this loop we exclude output mono channels since - they were handled in the pass above. - */ - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; - if (channelPosIn == MA_CHANNEL_MONO) { - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; +static MA_INLINE void ma_pcm_interleave_s32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const ma_int32** src_s32 = (const ma_int32**)src; - if (channelPosOut != MA_CHANNEL_NONE && channelPosOut != MA_CHANNEL_MONO && channelPosOut != MA_CHANNEL_LFE) { - pRouter->config.weights[iChannelIn][iChannelOut] = 1; - } - } + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iFrame*channels + iChannel] = src_s32[iChannel][iFrame]; } } +} - /* The output mono channel is the average of all non-none, non-mono and non-lfe input channels. */ - { - ma_uint32 len = 0; - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; - - if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) { - len += 1; - } - } +static MA_INLINE void ma_pcm_interleave_s32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +} - if (len > 0) { - float monoWeight = 1.0f / len; +void ma_pcm_interleave_s32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_s32__optimized(dst, src, frameCount, channels); +#endif +} - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; - if (channelPosOut == MA_CHANNEL_MONO) { - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; +static MA_INLINE void ma_pcm_deinterleave_s32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_int32** dst_s32 = (ma_int32**)dst; + const ma_int32* src_s32 = (const ma_int32*)src; - if (channelPosIn != MA_CHANNEL_NONE && channelPosIn != MA_CHANNEL_MONO && channelPosIn != MA_CHANNEL_LFE) { - pRouter->config.weights[iChannelIn][iChannelOut] += monoWeight; - } - } - } - } + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_s32[iChannel][iFrame] = src_s32[iFrame*channels + iChannel]; } } +} +static MA_INLINE void ma_pcm_deinterleave_s32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +} - /* Input and output channels that are not present on the other side need to be blended in based on spatial locality. */ - switch (pRouter->config.mixingMode) - { - case ma_channel_mix_mode_rectangular: - { - /* Unmapped input channels. */ - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; - - if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) { - if (!ma_channel_map_contains_channel_position(pRouter->config.channelsOut, pRouter->config.channelMapOut, channelPosIn)) { - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; - - if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) { - float weight = 0; - if (pRouter->config.mixingMode == ma_channel_mix_mode_planar_blend) { - weight = ma_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut); - } - - /* Only apply the weight if we haven't already got some contribution from the respective channels. */ - if (pRouter->config.weights[iChannelIn][iChannelOut] == 0) { - pRouter->config.weights[iChannelIn][iChannelOut] = weight; - } - } - } - } - } - } - - /* Unmapped output channels. */ - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; +void ma_pcm_deinterleave_s32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_s32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_s32__optimized(dst, src, frameCount, channels); +#endif +} - if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) { - if (!ma_channel_map_contains_channel_position(pRouter->config.channelsIn, pRouter->config.channelMapIn, channelPosOut)) { - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - ma_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; - if (ma_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) { - float weight = 0; - if (pRouter->config.mixingMode == ma_channel_mix_mode_planar_blend) { - weight = ma_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut); - } +/* f32 */ +static MA_INLINE void ma_pcm_f32_to_u8__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint64 i; - /* Only apply the weight if we haven't already got some contribution from the respective channels. */ - if (pRouter->config.weights[iChannelIn][iChannelOut] == 0) { - pRouter->config.weights[iChannelIn][iChannelOut] = weight; - } - } - } - } - } - } - } break; + ma_uint8* dst_u8 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; - case ma_channel_mix_mode_custom_weights: - case ma_channel_mix_mode_simple: - default: - { - /* Fallthrough. */ - } break; + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -128; + ditherMax = 1.0f / 127; } - return MA_SUCCESS; + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 127.5f; /* 0..2 to 0..255 */ + + dst_u8[i] = (ma_uint8)x; + } } -static MA_INLINE ma_bool32 ma_channel_router__can_use_sse2(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn) +static MA_INLINE void ma_pcm_f32_to_u8__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return pRouter->useSSE2 && (((ma_uintptr)pSamplesOut & 15) == 0) && (((ma_uintptr)pSamplesIn & 15) == 0); + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); } -static MA_INLINE ma_bool32 ma_channel_router__can_use_avx2(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn) +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_u8__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return pRouter->useAVX2 && (((ma_uintptr)pSamplesOut & 31) == 0) && (((ma_uintptr)pSamplesIn & 31) == 0); + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); } - -static MA_INLINE ma_bool32 ma_channel_router__can_use_avx512(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn) +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_u8__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return pRouter->useAVX512 && (((ma_uintptr)pSamplesOut & 63) == 0) && (((ma_uintptr)pSamplesIn & 63) == 0); + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); } - -static MA_INLINE ma_bool32 ma_channel_router__can_use_neon(ma_channel_router* pRouter, const float* pSamplesOut, const float* pSamplesIn) +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_u8__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return pRouter->useNEON && (((ma_uintptr)pSamplesOut & 15) == 0) && (((ma_uintptr)pSamplesIn & 15) == 0); + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); } +#endif -void ma_channel_router__do_routing(ma_channel_router* pRouter, ma_uint64 frameCount, float** ppSamplesOut, const float** ppSamplesIn) +void ma_pcm_f32_to_u8(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_uint32 iChannelIn; - ma_uint32 iChannelOut; - - ma_assert(pRouter != NULL); - ma_assert(pRouter->isPassthrough == MA_FALSE); - - if (pRouter->isSimpleShuffle) { - /* A shuffle is just a re-arrangement of channels and does not require any arithmetic. */ - ma_assert(pRouter->config.channelsIn == pRouter->config.channelsOut); - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - iChannelOut = pRouter->shuffleTable[iChannelIn]; - ma_copy_memory_64(ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn], frameCount * sizeof(float)); - } - } else if (pRouter->isSimpleMonoExpansion) { - /* Simple case for expanding from mono. */ - if (pRouter->config.channelsOut == 2) { - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; ++iFrame) { - ppSamplesOut[0][iFrame] = ppSamplesIn[0][iFrame]; - ppSamplesOut[1][iFrame] = ppSamplesIn[0][iFrame]; - } - } else { - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_uint64 iFrame; - for (iFrame = 0; iFrame < frameCount; ++iFrame) { - ppSamplesOut[iChannelOut][iFrame] = ppSamplesIn[0][iFrame]; - } - } - } - } else if (pRouter->isStereoToMono) { - ma_uint64 iFrame; - - /* Simple case for going from stereo to mono. */ - ma_assert(pRouter->config.channelsIn == 2); - ma_assert(pRouter->config.channelsOut == 1); - - for (iFrame = 0; iFrame < frameCount; ++iFrame) { - ppSamplesOut[0][iFrame] = (ppSamplesIn[0][iFrame] + ppSamplesIn[1][iFrame]) * 0.5f; - } - } else { - /* This is the more complicated case. Each of the output channels is accumulated with 0 or more input channels. */ - - /* Clear. */ - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_zero_memory_64(ppSamplesOut[iChannelOut], frameCount * sizeof(float)); +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_u8__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_u8__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_u8__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_u8__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_u8__optimized(dst, src, count, ditherMode); } - - /* Accumulate. */ - for (iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - for (iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - ma_uint64 iFrame = 0; -#if defined(MA_SUPPORT_NEON) - if (ma_channel_router__can_use_neon(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - float32x4_t weight = vmovq_n_f32(pRouter->config.weights[iChannelIn][iChannelOut]); - ma_uint64 frameCount4 = frameCount/4; - ma_uint64 iFrame4; - - for (iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) { - float32x4_t* pO = (float32x4_t*)ppSamplesOut[iChannelOut] + iFrame4; - float32x4_t* pI = (float32x4_t*)ppSamplesIn [iChannelIn ] + iFrame4; - *pO = vaddq_f32(*pO, vmulq_f32(*pI, weight)); - } - - iFrame += frameCount4*4; - } - else -#endif -#if defined(MA_SUPPORT_AVX512) - if (ma_channel_router__can_use_avx512(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - __m512 weight = _mm512_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]); - ma_uint64 frameCount16 = frameCount/16; - ma_uint64 iFrame16; - - for (iFrame16 = 0; iFrame16 < frameCount16; iFrame16 += 1) { - __m512* pO = (__m512*)ppSamplesOut[iChannelOut] + iFrame16; - __m512* pI = (__m512*)ppSamplesIn [iChannelIn ] + iFrame16; - *pO = _mm512_add_ps(*pO, _mm512_mul_ps(*pI, weight)); - } - - iFrame += frameCount16*16; - } - else -#endif -#if defined(MA_SUPPORT_AVX2) - if (ma_channel_router__can_use_avx2(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - __m256 weight = _mm256_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]); - ma_uint64 frameCount8 = frameCount/8; - ma_uint64 iFrame8; - - for (iFrame8 = 0; iFrame8 < frameCount8; iFrame8 += 1) { - __m256* pO = (__m256*)ppSamplesOut[iChannelOut] + iFrame8; - __m256* pI = (__m256*)ppSamplesIn [iChannelIn ] + iFrame8; - *pO = _mm256_add_ps(*pO, _mm256_mul_ps(*pI, weight)); - } - - iFrame += frameCount8*8; - } - else #endif -#if defined(MA_SUPPORT_SSE2) - if (ma_channel_router__can_use_sse2(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - __m128 weight = _mm_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]); - ma_uint64 frameCount4 = frameCount/4; - ma_uint64 iFrame4; - - for (iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) { - __m128* pO = (__m128*)ppSamplesOut[iChannelOut] + iFrame4; - __m128* pI = (__m128*)ppSamplesIn [iChannelIn ] + iFrame4; - *pO = _mm_add_ps(*pO, _mm_mul_ps(*pI, weight)); - } - - iFrame += frameCount4*4; - } else -#endif - { /* Reference. */ - float weight0 = pRouter->config.weights[iChannelIn][iChannelOut]; - float weight1 = pRouter->config.weights[iChannelIn][iChannelOut]; - float weight2 = pRouter->config.weights[iChannelIn][iChannelOut]; - float weight3 = pRouter->config.weights[iChannelIn][iChannelOut]; - ma_uint64 frameCount4 = frameCount/4; - ma_uint64 iFrame4; - - for (iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) { - ppSamplesOut[iChannelOut][iFrame+0] += ppSamplesIn[iChannelIn][iFrame+0] * weight0; - ppSamplesOut[iChannelOut][iFrame+1] += ppSamplesIn[iChannelIn][iFrame+1] * weight1; - ppSamplesOut[iChannelOut][iFrame+2] += ppSamplesIn[iChannelIn][iFrame+2] * weight2; - ppSamplesOut[iChannelOut][iFrame+3] += ppSamplesIn[iChannelIn][iFrame+3] * weight3; - iFrame += 4; - } - } - - /* Leftover. */ - for (; iFrame < frameCount; ++iFrame) { - ppSamplesOut[iChannelOut][iFrame] += ppSamplesIn[iChannelIn][iFrame] * pRouter->config.weights[iChannelIn][iChannelOut]; - } - } - } - } } -ma_uint64 ma_channel_router_read_deinterleaved(ma_channel_router* pRouter, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData) +#ifdef MA_USE_REFERENCE_CONVERSION_APIS +static MA_INLINE void ma_pcm_f32_to_s16__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - if (pRouter == NULL || ppSamplesOut == NULL) { - return 0; - } - - /* Fast path for a passthrough. */ - if (pRouter->isPassthrough) { - if (frameCount <= 0xFFFFFFFF) { - return (ma_uint32)pRouter->config.onReadDeinterleaved(pRouter, (ma_uint32)frameCount, ppSamplesOut, pUserData); - } else { - float* ppNextSamplesOut[MA_MAX_CHANNELS]; - ma_uint64 totalFramesRead; - - ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(float*) * pRouter->config.channelsOut); - - totalFramesRead = 0; - while (totalFramesRead < frameCount) { - ma_uint32 iChannel; - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > 0xFFFFFFFF) { - framesToReadRightNow = 0xFFFFFFFF; - } - - framesJustRead = (ma_uint32)pRouter->config.onReadDeinterleaved(pRouter, (ma_uint32)framesToReadRightNow, (void**)ppNextSamplesOut, pUserData); - if (framesJustRead == 0) { - break; - } - - totalFramesRead += framesJustRead; - - if (framesJustRead < framesToReadRightNow) { - break; - } + ma_uint64 i; - for (iChannel = 0; iChannel < pRouter->config.channelsOut; ++iChannel) { - ppNextSamplesOut[iChannel] += framesJustRead; - } - } + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; - return totalFramesRead; - } + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; } - /* Slower path for a non-passthrough. */ - { - float* ppNextSamplesOut[MA_MAX_CHANNELS]; - float* ppTemp[MA_MAX_CHANNELS]; - size_t maxBytesToReadPerFrameEachIteration; - size_t maxFramesToReadEachIteration; - ma_uint64 totalFramesRead; - MA_ALIGN(MA_SIMD_ALIGNMENT) float temp[MA_MAX_CHANNELS * 256]; - - ma_assert(sizeof(temp) <= 0xFFFFFFFF); - ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(float*) * pRouter->config.channelsOut); - - - ma_split_buffer(temp, sizeof(temp), pRouter->config.channelsIn, MA_SIMD_ALIGNMENT, (void**)&ppTemp, &maxBytesToReadPerFrameEachIteration); - - maxFramesToReadEachIteration = maxBytesToReadPerFrameEachIteration/sizeof(float); - - totalFramesRead = 0; - while (totalFramesRead < frameCount) { - ma_uint32 iChannel; - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > maxFramesToReadEachIteration) { - framesToReadRightNow = maxFramesToReadEachIteration; - } - - framesJustRead = pRouter->config.onReadDeinterleaved(pRouter, (ma_uint32)framesToReadRightNow, (void**)ppTemp, pUserData); - if (framesJustRead == 0) { - break; - } - - ma_channel_router__do_routing(pRouter, framesJustRead, (float**)ppNextSamplesOut, (const float**)ppTemp); /* <-- Real work is done here. */ - - totalFramesRead += framesJustRead; - if (totalFramesRead < frameCount) { - for (iChannel = 0; iChannel < pRouter->config.channelsIn; iChannel += 1) { - ppNextSamplesOut[iChannel] += framesJustRead; - } - } + for (i = 0; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - if (framesJustRead < framesToReadRightNow) { - break; - } - } +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 32767.5f; /* 0..2 to 0..65535 */ + x = x - 32768.0f; /* 0...65535 to -32768..32767 */ +#else + /* The fast way. */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ +#endif - return totalFramesRead; + dst_s16[i] = (ma_int16)x; } } - -ma_channel_router_config ma_channel_router_config_init(ma_uint32 channelsIn, const ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint32 channelsOut, const ma_channel channelMapOut[MA_MAX_CHANNELS], ma_channel_mix_mode mixingMode, ma_channel_router_read_deinterleaved_proc onRead, void* pUserData) +#else +static MA_INLINE void ma_pcm_f32_to_s16__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_channel_router_config config; - ma_uint32 iChannel; - - ma_zero_object(&config); + ma_uint64 i; + ma_uint64 i4; + ma_uint64 count4; - config.channelsIn = channelsIn; - for (iChannel = 0; iChannel < channelsIn; ++iChannel) { - config.channelMapIn[iChannel] = channelMapIn[iChannel]; - } + ma_int16* dst_s16 = (ma_int16*)dst; + const float* src_f32 = (const float*)src; - config.channelsOut = channelsOut; - for (iChannel = 0; iChannel < channelsOut; ++iChannel) { - config.channelMapOut[iChannel] = channelMapOut[iChannel]; + float ditherMin = 0; + float ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; } - config.mixingMode = mixingMode; - config.onReadDeinterleaved = onRead; - config.pUserData = pUserData; + /* Unrolled. */ + i = 0; + count4 = count >> 2; + for (i4 = 0; i4 < count4; i4 += 1) { + float d0 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d1 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d2 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + float d3 = ma_dither_f32(ditherMode, ditherMin, ditherMax); + + float x0 = src_f32[i+0]; + float x1 = src_f32[i+1]; + float x2 = src_f32[i+2]; + float x3 = src_f32[i+3]; - return config; -} + x0 = x0 + d0; + x1 = x1 + d1; + x2 = x2 + d2; + x3 = x3 + d3; + x0 = ((x0 < -1) ? -1 : ((x0 > 1) ? 1 : x0)); + x1 = ((x1 < -1) ? -1 : ((x1 > 1) ? 1 : x1)); + x2 = ((x2 < -1) ? -1 : ((x2 > 1) ? 1 : x2)); + x3 = ((x3 < -1) ? -1 : ((x3 > 1) ? 1 : x3)); + x0 = x0 * 32767.0f; + x1 = x1 * 32767.0f; + x2 = x2 * 32767.0f; + x3 = x3 * 32767.0f; -/************************************************************************************************************************************************************** + dst_s16[i+0] = (ma_int16)x0; + dst_s16[i+1] = (ma_int16)x1; + dst_s16[i+2] = (ma_int16)x2; + dst_s16[i+3] = (ma_int16)x3; -SRC + i += 4; + } -**************************************************************************************************************************************************************/ -#define ma_floorf(x) ((float)floor((double)(x))) -#define ma_sinf(x) ((float)sin((double)(x))) -#define ma_cosf(x) ((float)cos((double)(x))) + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ -static MA_INLINE double ma_sinc(double x) -{ - if (x != 0) { - return sin(MA_PI_D*x) / (MA_PI_D*x); - } else { - return 1; + dst_s16[i] = (ma_int16)x; } } +#endif -#define ma_sincf(x) ((float)ma_sinc((double)(x))) - - -ma_uint64 ma_src_read_deinterleaved__passthrough(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData); -ma_uint64 ma_src_read_deinterleaved__linear(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData); -ma_uint64 ma_src_read_deinterleaved__sinc(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData); - -void ma_src__build_sinc_table__sinc(ma_src* pSRC) +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s16__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_uint32 i; - - ma_assert(pSRC != NULL); + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; - pSRC->sinc.table[0] = 1.0f; - for (i = 1; i < ma_countof(pSRC->sinc.table); i += 1) { - double x = i*MA_PI_D / MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION; - pSRC->sinc.table[i] = (float)(sin(x)/x); + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; } -} -void ma_src__build_sinc_table__rectangular(ma_src* pSRC) -{ - /* This is the same as the base sinc table. */ - ma_src__build_sinc_table__sinc(pSRC); -} + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; -void ma_src__build_sinc_table__hann(ma_src* pSRC) -{ - ma_uint32 i; + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } + + i = 0; - ma_src__build_sinc_table__sinc(pSRC); + /* SSE2. SSE allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + __m128 d0; + __m128 d1; + __m128 x0; + __m128 x1; - for (i = 0; i < ma_countof(pSRC->sinc.table); i += 1) { - double x = pSRC->sinc.table[i]; - double N = MA_SRC_SINC_MAX_WINDOW_WIDTH*2; - double n = ((double)(i) / MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION) + MA_SRC_SINC_MAX_WINDOW_WIDTH; - double w = 0.5 * (1 - cos((2*MA_PI_D*n) / (N))); + if (ditherMode == ma_dither_mode_none) { + d0 = _mm_set1_ps(0); + d1 = _mm_set1_ps(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + d0 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + } else { + d0 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + d1 = _mm_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + } - pSRC->sinc.table[i] = (float)(x * w); - } -} + x0 = *((__m128*)(src_f32 + i) + 0); + x1 = *((__m128*)(src_f32 + i) + 1); -ma_result ma_src_init(const ma_src_config* pConfig, ma_src* pSRC) -{ - if (pSRC == NULL) { - return MA_INVALID_ARGS; - } + x0 = _mm_add_ps(x0, d0); + x1 = _mm_add_ps(x1, d1); - ma_zero_object(pSRC); + x0 = _mm_mul_ps(x0, _mm_set1_ps(32767.0f)); + x1 = _mm_mul_ps(x1, _mm_set1_ps(32767.0f)); - if (pConfig == NULL || pConfig->onReadDeinterleaved == NULL) { - return MA_INVALID_ARGS; - } - if (pConfig->channels == 0 || pConfig->channels > MA_MAX_CHANNELS) { - return MA_INVALID_ARGS; + _mm_stream_si128(((__m128i*)(dst_s16 + i)), _mm_packs_epi32(_mm_cvttps_epi32(x0), _mm_cvttps_epi32(x1))); + + i += 8; } - pSRC->config = *pConfig; - /* SIMD */ - pSRC->useSSE2 = ma_has_sse2() && !pConfig->noSSE2; - pSRC->useAVX2 = ma_has_avx2() && !pConfig->noAVX2; - pSRC->useAVX512 = ma_has_avx512f() && !pConfig->noAVX512; - pSRC->useNEON = ma_has_neon() && !pConfig->noNEON; - - if (pSRC->config.algorithm == ma_src_algorithm_sinc) { - /* Make sure the window width within bounds. */ - if (pSRC->config.sinc.windowWidth == 0) { - pSRC->config.sinc.windowWidth = MA_SRC_SINC_DEFAULT_WINDOW_WIDTH; - } - if (pSRC->config.sinc.windowWidth < MA_SRC_SINC_MIN_WINDOW_WIDTH) { - pSRC->config.sinc.windowWidth = MA_SRC_SINC_MIN_WINDOW_WIDTH; - } - if (pSRC->config.sinc.windowWidth > MA_SRC_SINC_MAX_WINDOW_WIDTH) { - pSRC->config.sinc.windowWidth = MA_SRC_SINC_MAX_WINDOW_WIDTH; - } + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ - /* Set up the lookup table. */ - switch (pSRC->config.sinc.windowFunction) { - case ma_src_sinc_window_function_hann: ma_src__build_sinc_table__hann(pSRC); break; - case ma_src_sinc_window_function_rectangular: ma_src__build_sinc_table__rectangular(pSRC); break; - default: return MA_INVALID_ARGS; /* <-- Hitting this means the window function is unknown to miniaudio. */ - } + dst_s16[i] = (ma_int16)x; } - - return MA_SUCCESS; } - -ma_result ma_src_set_sample_rate(ma_src* pSRC, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_s16__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - if (pSRC == NULL) { - return MA_INVALID_ARGS; - } + ma_uint64 i; + ma_uint64 i16; + ma_uint64 count16; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; - /* Must have a sample rate of > 0. */ - if (sampleRateIn == 0 || sampleRateOut == 0) { - return MA_INVALID_ARGS; + /* Both the input and output buffers need to be aligned to 32 bytes. */ + if ((((ma_uintptr)dst & 31) != 0) || (((ma_uintptr)src & 31) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; } - ma_atomic_exchange_32(&pSRC->config.sampleRateIn, sampleRateIn); - ma_atomic_exchange_32(&pSRC->config.sampleRateOut, sampleRateOut); + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; - return MA_SUCCESS; -} + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } -ma_uint64 ma_src_read_deinterleaved(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData) -{ - ma_src_algorithm algorithm; + i = 0; - if (pSRC == NULL || frameCount == 0 || ppSamplesOut == NULL) { - return 0; - } + /* AVX2. AVX2 allows us to output 16 s16's at a time which means our loop is unrolled 16 times. */ + count16 = count >> 4; + for (i16 = 0; i16 < count16; i16 += 1) { + __m256 d0; + __m256 d1; + __m256 x0; + __m256 x1; + __m256i i0; + __m256i i1; + __m256i p0; + __m256i p1; + __m256i r; - algorithm = pSRC->config.algorithm; + if (ditherMode == ma_dither_mode_none) { + d0 = _mm256_set1_ps(0); + d1 = _mm256_set1_ps(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + d0 = _mm256_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + d1 = _mm256_set_ps( + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax), + ma_dither_f32_rectangle(ditherMin, ditherMax) + ); + } else { + d0 = _mm256_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + d1 = _mm256_set_ps( + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax), + ma_dither_f32_triangle(ditherMin, ditherMax) + ); + } - /* Can use a function pointer for this. */ - switch (algorithm) { - case ma_src_algorithm_none: return ma_src_read_deinterleaved__passthrough(pSRC, frameCount, ppSamplesOut, pUserData); - case ma_src_algorithm_linear: return ma_src_read_deinterleaved__linear( pSRC, frameCount, ppSamplesOut, pUserData); - case ma_src_algorithm_sinc: return ma_src_read_deinterleaved__sinc( pSRC, frameCount, ppSamplesOut, pUserData); - default: break; - } + x0 = *((__m256*)(src_f32 + i) + 0); + x1 = *((__m256*)(src_f32 + i) + 1); - /* Should never get here. */ - return 0; -} + x0 = _mm256_add_ps(x0, d0); + x1 = _mm256_add_ps(x1, d1); -ma_uint64 ma_src_read_deinterleaved__passthrough(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData) -{ - if (frameCount <= 0xFFFFFFFF) { - return pSRC->config.onReadDeinterleaved(pSRC, (ma_uint32)frameCount, ppSamplesOut, pUserData); - } else { - ma_uint32 iChannel; - ma_uint64 totalFramesRead; - float* ppNextSamplesOut[MA_MAX_CHANNELS]; + x0 = _mm256_mul_ps(x0, _mm256_set1_ps(32767.0f)); + x1 = _mm256_mul_ps(x1, _mm256_set1_ps(32767.0f)); - for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) { - ppNextSamplesOut[iChannel] = (float*)ppSamplesOut[iChannel]; - } + /* Computing the final result is a little more complicated for AVX2 than SSE2. */ + i0 = _mm256_cvttps_epi32(x0); + i1 = _mm256_cvttps_epi32(x1); + p0 = _mm256_permute2x128_si256(i0, i1, 0 | 32); + p1 = _mm256_permute2x128_si256(i0, i1, 1 | 48); + r = _mm256_packs_epi32(p0, p1); - totalFramesRead = 0; - while (totalFramesRead < frameCount) { - ma_uint32 framesJustRead; - ma_uint64 framesRemaining = frameCount - totalFramesRead; - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > 0xFFFFFFFF) { - framesToReadRightNow = 0xFFFFFFFF; - } + _mm256_stream_si256(((__m256i*)(dst_s16 + i)), r); - framesJustRead = (ma_uint32)pSRC->config.onReadDeinterleaved(pSRC, (ma_uint32)framesToReadRightNow, (void**)ppNextSamplesOut, pUserData); - if (framesJustRead == 0) { - break; - } + i += 16; + } - totalFramesRead += framesJustRead; - for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) { - ppNextSamplesOut[iChannel] += framesJustRead; - } - if (framesJustRead < framesToReadRightNow) { - break; - } - } + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ - return totalFramesRead; + dst_s16[i] = (ma_int16)x; } } - -ma_uint64 ma_src_read_deinterleaved__linear(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData) +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s16__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - float* ppNextSamplesOut[MA_MAX_CHANNELS]; - float factor; - ma_uint32 maxFrameCountPerChunkIn; - ma_uint64 totalFramesRead; - - ma_assert(pSRC != NULL); - ma_assert(frameCount > 0); - ma_assert(ppSamplesOut != NULL); - - ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(void*) * pSRC->config.channels); - - factor = (float)pSRC->config.sampleRateIn / pSRC->config.sampleRateOut; - maxFrameCountPerChunkIn = ma_countof(pSRC->linear.input[0]); - - totalFramesRead = 0; - while (totalFramesRead < frameCount) { - ma_uint32 iChannel; - float tBeg; - float tEnd; - float tAvailable; - float tNext; - float* ppSamplesFromClient[MA_MAX_CHANNELS]; - ma_uint32 iNextFrame; - ma_uint32 maxOutputFramesToRead; - ma_uint32 maxOutputFramesToRead4; - ma_uint32 framesToReadFromClient; - ma_uint32 framesReadFromClient; - ma_uint64 framesRemaining = frameCount - totalFramesRead; - ma_uint64 framesToRead = framesRemaining; - if (framesToRead > 16384) { - framesToRead = 16384; /* <-- Keep this small because we're using 32-bit floats for calculating sample positions and I don't want to run out of precision with huge sample counts. */ - } - - - /* Read Input Data */ - tBeg = pSRC->linear.timeIn; - tEnd = tBeg + ((ma_int64)framesToRead*factor); /* Cast to int64 required for VC6. */ - - framesToReadFromClient = (ma_uint32)(tEnd) + 1 + 1; /* +1 to make tEnd 1-based and +1 because we always need to an extra sample for interpolation. */ - if (framesToReadFromClient >= maxFrameCountPerChunkIn) { - framesToReadFromClient = maxFrameCountPerChunkIn; - } - - for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) { - ppSamplesFromClient[iChannel] = pSRC->linear.input[iChannel] + pSRC->linear.leftoverFrames; - } - - framesReadFromClient = 0; - if (framesToReadFromClient > pSRC->linear.leftoverFrames) { - framesReadFromClient = (ma_uint32)pSRC->config.onReadDeinterleaved(pSRC, (ma_uint32)framesToReadFromClient - pSRC->linear.leftoverFrames, (void**)ppSamplesFromClient, pUserData); - } - - framesReadFromClient += pSRC->linear.leftoverFrames; /* <-- You can sort of think of it as though we've re-read the leftover samples from the client. */ - if (framesReadFromClient < 2) { - break; - } - - for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) { - ppSamplesFromClient[iChannel] = pSRC->linear.input[iChannel]; - } - - - /* Write Output Data */ - - /* - At this point we have a bunch of frames that the client has given to us for processing. From this we can determine the maximum number of output frames - that can be processed from this input. We want to output as many samples as possible from our input data. - */ - tAvailable = framesReadFromClient - tBeg - 1; /* Subtract 1 because the last input sample is needed for interpolation and cannot be included in the output sample count calculation. */ - - maxOutputFramesToRead = (ma_uint32)(tAvailable / factor); - if (maxOutputFramesToRead == 0) { - maxOutputFramesToRead = 1; - } - if (maxOutputFramesToRead > framesToRead) { - maxOutputFramesToRead = (ma_uint32)framesToRead; - } + ma_uint64 i; + ma_uint64 i8; + ma_uint64 count8; + ma_int16* dst_s16; + const float* src_f32; + float ditherMin; + float ditherMax; - /* Output frames are always read in groups of 4 because I'm planning on using this as a reference for some SIMD-y stuff later. */ - maxOutputFramesToRead4 = maxOutputFramesToRead/4; - for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) { - ma_uint32 iFrameOut; - float t0 = pSRC->linear.timeIn + factor*0; - float t1 = pSRC->linear.timeIn + factor*1; - float t2 = pSRC->linear.timeIn + factor*2; - float t3 = pSRC->linear.timeIn + factor*3; - float t; - - for (iFrameOut = 0; iFrameOut < maxOutputFramesToRead4; iFrameOut += 1) { - float iPrevSample0 = (float)floor(t0); - float iPrevSample1 = (float)floor(t1); - float iPrevSample2 = (float)floor(t2); - float iPrevSample3 = (float)floor(t3); - - float iNextSample0 = iPrevSample0 + 1; - float iNextSample1 = iPrevSample1 + 1; - float iNextSample2 = iPrevSample2 + 1; - float iNextSample3 = iPrevSample3 + 1; - - float alpha0 = t0 - iPrevSample0; - float alpha1 = t1 - iPrevSample1; - float alpha2 = t2 - iPrevSample2; - float alpha3 = t3 - iPrevSample3; - - float prevSample0 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample0]; - float prevSample1 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample1]; - float prevSample2 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample2]; - float prevSample3 = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample3]; - - float nextSample0 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample0]; - float nextSample1 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample1]; - float nextSample2 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample2]; - float nextSample3 = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample3]; + if (!ma_has_neon()) { + return ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + } - ppNextSamplesOut[iChannel][iFrameOut*4 + 0] = ma_mix_f32_fast(prevSample0, nextSample0, alpha0); - ppNextSamplesOut[iChannel][iFrameOut*4 + 1] = ma_mix_f32_fast(prevSample1, nextSample1, alpha1); - ppNextSamplesOut[iChannel][iFrameOut*4 + 2] = ma_mix_f32_fast(prevSample2, nextSample2, alpha2); - ppNextSamplesOut[iChannel][iFrameOut*4 + 3] = ma_mix_f32_fast(prevSample3, nextSample3, alpha3); + /* Both the input and output buffers need to be aligned to 16 bytes. */ + if ((((ma_uintptr)dst & 15) != 0) || (((ma_uintptr)src & 15) != 0)) { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + return; + } - t0 += factor*4; - t1 += factor*4; - t2 += factor*4; - t3 += factor*4; - } + dst_s16 = (ma_int16*)dst; + src_f32 = (const float*)src; - t = pSRC->linear.timeIn + (factor*maxOutputFramesToRead4*4); - for (iFrameOut = (maxOutputFramesToRead4*4); iFrameOut < maxOutputFramesToRead; iFrameOut += 1) { - float iPrevSample = (float)floor(t); - float iNextSample = iPrevSample + 1; - float alpha = t - iPrevSample; - float prevSample; - float nextSample; + ditherMin = 0; + ditherMax = 0; + if (ditherMode != ma_dither_mode_none) { + ditherMin = 1.0f / -32768; + ditherMax = 1.0f / 32767; + } - ma_assert(iPrevSample < ma_countof(pSRC->linear.input[iChannel])); - ma_assert(iNextSample < ma_countof(pSRC->linear.input[iChannel])); + i = 0; - prevSample = ppSamplesFromClient[iChannel][(ma_uint32)iPrevSample]; - nextSample = ppSamplesFromClient[iChannel][(ma_uint32)iNextSample]; + /* NEON. NEON allows us to output 8 s16's at a time which means our loop is unrolled 8 times. */ + count8 = count >> 3; + for (i8 = 0; i8 < count8; i8 += 1) { + float32x4_t d0; + float32x4_t d1; + float32x4_t x0; + float32x4_t x1; + int32x4_t i0; + int32x4_t i1; - ppNextSamplesOut[iChannel][iFrameOut] = ma_mix_f32_fast(prevSample, nextSample, alpha); + if (ditherMode == ma_dither_mode_none) { + d0 = vmovq_n_f32(0); + d1 = vmovq_n_f32(0); + } else if (ditherMode == ma_dither_mode_rectangle) { + float d0v[4]; + d0v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); - t += factor; - } + float d1v[4]; + d1v[0] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_rectangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); + } else { + float d0v[4]; + d0v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d0 = vld1q_f32(d0v); - ppNextSamplesOut[iChannel] += maxOutputFramesToRead; + float d1v[4]; + d1v[0] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[1] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[2] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1v[3] = ma_dither_f32_triangle(ditherMin, ditherMax); + d1 = vld1q_f32(d1v); } - totalFramesRead += maxOutputFramesToRead; - - - /* Residual */ - tNext = pSRC->linear.timeIn + (maxOutputFramesToRead*factor); + x0 = *((float32x4_t*)(src_f32 + i) + 0); + x1 = *((float32x4_t*)(src_f32 + i) + 1); - pSRC->linear.timeIn = tNext; - ma_assert(tNext <= framesReadFromClient+1); + x0 = vaddq_f32(x0, d0); + x1 = vaddq_f32(x1, d1); - iNextFrame = (ma_uint32)floor(tNext); - pSRC->linear.leftoverFrames = framesReadFromClient - iNextFrame; - pSRC->linear.timeIn = tNext - iNextFrame; + x0 = vmulq_n_f32(x0, 32767.0f); + x1 = vmulq_n_f32(x1, 32767.0f); - for (iChannel = 0; iChannel < pSRC->config.channels; ++iChannel) { - ma_uint32 iFrame; - for (iFrame = 0; iFrame < pSRC->linear.leftoverFrames; ++iFrame) { - float sample = ppSamplesFromClient[iChannel][framesReadFromClient-pSRC->linear.leftoverFrames + iFrame]; - ppSamplesFromClient[iChannel][iFrame] = sample; - } - } + i0 = vcvtq_s32_f32(x0); + i1 = vcvtq_s32_f32(x1); + *((int16x8_t*)(dst_s16 + i)) = vcombine_s16(vqmovn_s32(i0), vqmovn_s32(i1)); - - /* Exit the loop if we've found everything from the client. */ - if (framesReadFromClient < framesToReadFromClient) { - break; - } + i += 8; } - return totalFramesRead; -} + /* Leftover. */ + for (; i < count; i += 1) { + float x = src_f32[i]; + x = x + ma_dither_f32(ditherMode, ditherMin, ditherMax); + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ + x = x * 32767.0f; /* -1..1 to -32767..32767 */ -ma_src_config ma_src_config_init_new() -{ - ma_src_config config; - ma_zero_object(&config); - - return config; + dst_s16[i] = (ma_int16)x; + } } +#endif -ma_src_config ma_src_config_init(ma_uint32 sampleRateIn, ma_uint32 sampleRateOut, ma_uint32 channels, ma_src_read_deinterleaved_proc onReadDeinterleaved, void* pUserData) +void ma_pcm_f32_to_s16(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - ma_src_config config = ma_src_config_init_new(); - config.sampleRateIn = sampleRateIn; - config.sampleRateOut = sampleRateOut; - config.channels = channels; - config.onReadDeinterleaved = onReadDeinterleaved; - config.pUserData = pUserData; - - return config; +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s16__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_s16__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_s16__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_s16__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s16__optimized(dst, src, count, ditherMode); + } +#endif } -/************************************************************************************************************************************************************** - -Sinc Sample Rate Conversion -=========================== - -The sinc SRC algorithm uses a windowed sinc to perform interpolation of samples. Currently, miniaudio's implementation supports rectangular and Hann window -methods. - -Whenever an output sample is being computed, it looks at a sub-section of the input samples. I've called this sub-section in the code below the "window", -which I realize is a bit ambigous with the mathematical "window", but it works for me when I need to conceptualize things in my head. The window is made up -of two halves. The first half contains past input samples (initialized to zero), and the second half contains future input samples. As time moves forward -and input samples are consumed, the window moves forward. The larger the window, the better the quality at the expense of slower processing. The window is -limited the range [MA_SRC_SINC_MIN_WINDOW_WIDTH, MA_SRC_SINC_MAX_WINDOW_WIDTH] and defaults to MA_SRC_SINC_DEFAULT_WINDOW_WIDTH. - -Input samples are cached for efficiency (to prevent frequently requesting tiny numbers of samples from the client). When the window gets to the end of the -cache, it's moved back to the start, and more samples are read from the client. If the client has no more data to give, the cache is filled with zeros and -the last of the input samples will be consumed. Once the last of the input samples have been consumed, no more samples will be output. - - -When reading output samples, we always first read whatever is already in the input cache. Only when the cache has been fully consumed do we read more data -from the client. - -To access samples in the input buffer you do so relative to the window. When the window itself is at position 0, the first item in the buffer is accessed -with "windowPos + windowWidth". Generally, to access any sample relative to the window you do "windowPos + windowWidth + sampleIndexRelativeToWindow". +static MA_INLINE void ma_pcm_f32_to_s24__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_uint8* dst_s24 = (ma_uint8*)dst; + const float* src_f32 = (const float*)src; -**************************************************************************************************************************************************************/ + ma_uint64 i; + for (i = 0; i < count; i += 1) { + ma_int32 r; + float x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ -/* Comment this to disable interpolation of table lookups. Less accurate, but faster. */ -#define MA_USE_SINC_TABLE_INTERPOLATION +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 8388607.5f; /* 0..2 to 0..16777215 */ + x = x - 8388608.0f; /* 0..16777215 to -8388608..8388607 */ +#else + /* The fast way. */ + x = x * 8388607.0f; /* -1..1 to -8388607..8388607 */ +#endif -/* Retrieves a sample from the input buffer's window. Values >= 0 retrieve future samples. Negative values return past samples. */ -static MA_INLINE float ma_src_sinc__get_input_sample_from_window(const ma_src* pSRC, ma_uint32 channel, ma_uint32 windowPosInSamples, ma_int32 sampleIndex) -{ - ma_assert(pSRC != NULL); - ma_assert(channel < pSRC->config.channels); - ma_assert(sampleIndex >= -(ma_int32)pSRC->config.sinc.windowWidth); - ma_assert(sampleIndex < (ma_int32)pSRC->config.sinc.windowWidth); + r = (ma_int32)x; + dst_s24[(i*3)+0] = (ma_uint8)((r & 0x0000FF) >> 0); + dst_s24[(i*3)+1] = (ma_uint8)((r & 0x00FF00) >> 8); + dst_s24[(i*3)+2] = (ma_uint8)((r & 0xFF0000) >> 16); + } - /* The window should always be contained within the input cache. */ - ma_assert(windowPosInSamples < ma_countof(pSRC->sinc.input[0]) - pSRC->config.sinc.windowWidth); - - return pSRC->sinc.input[channel][windowPosInSamples + pSRC->config.sinc.windowWidth + sampleIndex]; + (void)ditherMode; /* No dithering for f32 -> s24. */ } -static MA_INLINE float ma_src_sinc__interpolation_factor(const ma_src* pSRC, float x) +static MA_INLINE void ma_pcm_f32_to_s24__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - float xabs; - ma_int32 ixabs; - - ma_assert(pSRC != NULL); - - xabs = (float)fabs(x); - xabs = xabs * MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION; - ixabs = (ma_int32)xabs; - -#if defined(MA_USE_SINC_TABLE_INTERPOLATION) - { - float a = xabs - ixabs; - return ma_mix_f32_fast(pSRC->sinc.table[ixabs], pSRC->sinc.table[ixabs+1], a); - } -#else - return pSRC->sinc.table[ixabs]; -#endif + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); } #if defined(MA_SUPPORT_SSE2) -static MA_INLINE __m128 ma_fabsf_sse2(__m128 x) +static MA_INLINE void ma_pcm_f32_to_s24__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return _mm_and_ps(_mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)), x); + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); } - -static MA_INLINE __m128 ma_truncf_sse2(__m128 x) +#endif +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_s24__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return _mm_cvtepi32_ps(_mm_cvttps_epi32(x)); + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); } - -static MA_INLINE __m128 ma_src_sinc__interpolation_factor__sse2(const ma_src* pSRC, __m128 x) +#endif +#if defined(MA_SUPPORT_NEON) +static MA_INLINE void ma_pcm_f32_to_s24__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - __m128 resolution128; - __m128 xabs; - __m128i ixabs; - __m128 lo; - __m128 hi; - __m128 a; - __m128 r; - int* ixabsv; - - resolution128 = _mm_set1_ps(MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION); - xabs = ma_fabsf_sse2(x); - xabs = _mm_mul_ps(xabs, resolution128); - ixabs = _mm_cvttps_epi32(xabs); - - ixabsv = (int*)&ixabs; - - lo = _mm_set_ps( - pSRC->sinc.table[ixabsv[3]], - pSRC->sinc.table[ixabsv[2]], - pSRC->sinc.table[ixabsv[1]], - pSRC->sinc.table[ixabsv[0]] - ); - - hi = _mm_set_ps( - pSRC->sinc.table[ixabsv[3]+1], - pSRC->sinc.table[ixabsv[2]+1], - pSRC->sinc.table[ixabsv[1]+1], - pSRC->sinc.table[ixabsv[0]+1] - ); - - a = _mm_sub_ps(xabs, _mm_cvtepi32_ps(ixabs)); - r = ma_mix_f32_fast__sse2(lo, hi, a); - - return r; + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); } #endif -#if defined(MA_SUPPORT_AVX2) -static MA_INLINE __m256 ma_fabsf_avx2(__m256 x) +void ma_pcm_f32_to_s24(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return _mm256_and_ps(_mm256_castsi256_ps(_mm256_set1_epi32(0x7FFFFFFF)), x); +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s24__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_s24__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_s24__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_s24__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s24__optimized(dst, src, count, ditherMode); + } +#endif } -#if 0 -static MA_INLINE __m256 ma_src_sinc__interpolation_factor__avx2(const ma_src* pSRC, __m256 x) -{ - __m256 resolution256 = _mm256_set1_ps(MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION); - __m256 xabs = ma_fabsf_avx2(x); - xabs = _mm256_mul_ps(xabs, resolution256); - - __m256i ixabs = _mm256_cvttps_epi32(xabs); - __m256 a = _mm256_sub_ps(xabs, _mm256_cvtepi32_ps(ixabs)); +static MA_INLINE void ma_pcm_f32_to_s32__reference(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_int32* dst_s32 = (ma_int32*)dst; + const float* src_f32 = (const float*)src; - - int* ixabsv = (int*)&ixabs; + ma_uint32 i; + for (i = 0; i < count; i += 1) { + double x = src_f32[i]; + x = ((x < -1) ? -1 : ((x > 1) ? 1 : x)); /* clip */ - __m256 lo = _mm256_set_ps( - pSRC->sinc.table[ixabsv[7]], - pSRC->sinc.table[ixabsv[6]], - pSRC->sinc.table[ixabsv[5]], - pSRC->sinc.table[ixabsv[4]], - pSRC->sinc.table[ixabsv[3]], - pSRC->sinc.table[ixabsv[2]], - pSRC->sinc.table[ixabsv[1]], - pSRC->sinc.table[ixabsv[0]] - ); - - __m256 hi = _mm256_set_ps( - pSRC->sinc.table[ixabsv[7]+1], - pSRC->sinc.table[ixabsv[6]+1], - pSRC->sinc.table[ixabsv[5]+1], - pSRC->sinc.table[ixabsv[4]+1], - pSRC->sinc.table[ixabsv[3]+1], - pSRC->sinc.table[ixabsv[2]+1], - pSRC->sinc.table[ixabsv[1]+1], - pSRC->sinc.table[ixabsv[0]+1] - ); +#if 0 + /* The accurate way. */ + x = x + 1; /* -1..1 to 0..2 */ + x = x * 2147483647.5; /* 0..2 to 0..4294967295 */ + x = x - 2147483648.0; /* 0...4294967295 to -2147483648..2147483647 */ +#else + /* The fast way. */ + x = x * 2147483647.0; /* -1..1 to -2147483647..2147483647 */ +#endif - __m256 r = ma_mix_f32_fast__avx2(lo, hi, a); + dst_s32[i] = (ma_int32)x; + } - return r; + (void)ditherMode; /* No dithering for f32 -> s32. */ } -#endif -#endif - -#if defined(MA_SUPPORT_NEON) -static MA_INLINE float32x4_t ma_fabsf_neon(float32x4_t x) +static MA_INLINE void ma_pcm_f32_to_s32__optimized(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - return vabdq_f32(vmovq_n_f32(0), x); + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); } -static MA_INLINE float32x4_t ma_src_sinc__interpolation_factor__neon(const ma_src* pSRC, float32x4_t x) +#if defined(MA_SUPPORT_SSE2) +static MA_INLINE void ma_pcm_f32_to_s32__sse2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - float32x4_t xabs; - int32x4_t ixabs; - float32x4_t a; - float32x4_t r; - int* ixabsv; - float lo[4]; - float hi[4]; - - xabs = ma_fabsf_neon(x); - xabs = vmulq_n_f32(xabs, MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION); - ixabs = vcvtq_s32_f32(xabs); - - ixabsv = (int*)&ixabs; - - lo[0] = pSRC->sinc.table[ixabsv[0]]; - lo[1] = pSRC->sinc.table[ixabsv[1]]; - lo[2] = pSRC->sinc.table[ixabsv[2]]; - lo[3] = pSRC->sinc.table[ixabsv[3]]; - - hi[0] = pSRC->sinc.table[ixabsv[0]+1]; - hi[1] = pSRC->sinc.table[ixabsv[1]+1]; - hi[2] = pSRC->sinc.table[ixabsv[2]+1]; - hi[3] = pSRC->sinc.table[ixabsv[3]+1]; - - a = vsubq_f32(xabs, vcvtq_f32_s32(ixabs)); - r = ma_mix_f32_fast__neon(vld1q_f32(lo), vld1q_f32(hi), a); - - return r; + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); } #endif - -ma_uint64 ma_src_read_deinterleaved__sinc(ma_src* pSRC, ma_uint64 frameCount, void** ppSamplesOut, void* pUserData) +#if defined(MA_SUPPORT_AVX2) +static MA_INLINE void ma_pcm_f32_to_s32__avx2(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) { - float factor; - float inverseFactor; - ma_int32 windowWidth; - ma_int32 windowWidth2; - ma_int32 windowWidthSIMD; - ma_int32 windowWidthSIMD2; - float* ppNextSamplesOut[MA_MAX_CHANNELS]; - float _windowSamplesUnaligned[MA_SRC_SINC_MAX_WINDOW_WIDTH*2 + MA_SIMD_ALIGNMENT]; - float* windowSamples; - float _iWindowFUnaligned[MA_SRC_SINC_MAX_WINDOW_WIDTH*2 + MA_SIMD_ALIGNMENT]; - float* iWindowF; - ma_int32 i; - ma_uint64 totalOutputFramesRead; - - ma_assert(pSRC != NULL); - ma_assert(frameCount > 0); - ma_assert(ppSamplesOut != NULL); - - factor = (float)pSRC->config.sampleRateIn / pSRC->config.sampleRateOut; - inverseFactor = 1/factor; - - windowWidth = (ma_int32)pSRC->config.sinc.windowWidth; - windowWidth2 = windowWidth*2; - - /* - There are cases where it's actually more efficient to increase the window width so that it's aligned with the respective - SIMD pipeline being used. - */ - windowWidthSIMD = windowWidth; - if (pSRC->useNEON) { - windowWidthSIMD = (windowWidthSIMD + 1) & ~(1); - } else if (pSRC->useAVX512) { - windowWidthSIMD = (windowWidthSIMD + 7) & ~(7); - } else if (pSRC->useAVX2) { - windowWidthSIMD = (windowWidthSIMD + 3) & ~(3); - } else if (pSRC->useSSE2) { - windowWidthSIMD = (windowWidthSIMD + 1) & ~(1); - } - - windowWidthSIMD2 = windowWidthSIMD*2; - (void)windowWidthSIMD2; /* <-- Silence a warning when SIMD is disabled. */ - - ma_copy_memory(ppNextSamplesOut, ppSamplesOut, sizeof(void*) * pSRC->config.channels); - - windowSamples = (float*)(((ma_uintptr)_windowSamplesUnaligned + MA_SIMD_ALIGNMENT-1) & ~(MA_SIMD_ALIGNMENT-1)); - ma_zero_memory(windowSamples, MA_SRC_SINC_MAX_WINDOW_WIDTH*2 * sizeof(float)); - - iWindowF = (float*)(((ma_uintptr)_iWindowFUnaligned + MA_SIMD_ALIGNMENT-1) & ~(MA_SIMD_ALIGNMENT-1)); - ma_zero_memory(iWindowF, MA_SRC_SINC_MAX_WINDOW_WIDTH*2 * sizeof(float)); - - for (i = 0; i < windowWidth2; ++i) { - iWindowF[i] = (float)(i - windowWidth); - } - - totalOutputFramesRead = 0; - while (totalOutputFramesRead < frameCount) { - ma_uint32 maxInputSamplesAvailableInCache; - float timeInBeg; - float timeInEnd; - ma_uint64 maxOutputFramesToRead; - ma_uint64 outputFramesRemaining; - ma_uint64 outputFramesToRead; - ma_uint32 iChannel; - ma_uint32 prevWindowPosInSamples; - ma_uint32 availableOutputFrames; - - /* - The maximum number of frames we can read this iteration depends on how many input samples we have available to us. This is the number - of input samples between the end of the window and the end of the cache. - */ - maxInputSamplesAvailableInCache = ma_countof(pSRC->sinc.input[0]) - (pSRC->config.sinc.windowWidth*2) - pSRC->sinc.windowPosInSamples; - if (maxInputSamplesAvailableInCache > pSRC->sinc.inputFrameCount) { - maxInputSamplesAvailableInCache = pSRC->sinc.inputFrameCount; - } - - /* Never consume the tail end of the input data if requested. */ - if (pSRC->config.neverConsumeEndOfInput) { - if (maxInputSamplesAvailableInCache >= pSRC->config.sinc.windowWidth) { - maxInputSamplesAvailableInCache -= pSRC->config.sinc.windowWidth; - } else { - maxInputSamplesAvailableInCache = 0; - } - } - - timeInBeg = pSRC->sinc.timeIn; - timeInEnd = (float)(pSRC->sinc.windowPosInSamples + maxInputSamplesAvailableInCache); - - ma_assert(timeInBeg >= 0); - ma_assert(timeInBeg <= timeInEnd); - - maxOutputFramesToRead = (ma_uint64)(((timeInEnd - timeInBeg) * inverseFactor)); - - outputFramesRemaining = frameCount - totalOutputFramesRead; - outputFramesToRead = outputFramesRemaining; - if (outputFramesToRead > maxOutputFramesToRead) { - outputFramesToRead = maxOutputFramesToRead; - } - - for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) { - /* Do SRC. */ - float timeIn = timeInBeg; - ma_uint32 iSample; - for (iSample = 0; iSample < outputFramesToRead; iSample += 1) { - float sampleOut = 0; - float iTimeInF = ma_floorf(timeIn); - ma_uint32 iTimeIn = (ma_uint32)iTimeInF; - ma_int32 iWindow = 0; - float tScalar; - - /* Pre-load the window samples into an aligned buffer to begin with. Need to put these into an aligned buffer to make SIMD easier. */ - windowSamples[0] = 0; /* <-- The first sample is always zero. */ - for (i = 1; i < windowWidth2; ++i) { - windowSamples[i] = pSRC->sinc.input[iChannel][iTimeIn + i]; - } - -#if defined(MA_SUPPORT_AVX2) || defined(MA_SUPPORT_AVX512) - if (pSRC->useAVX2 || pSRC->useAVX512) { - __m256i ixabs[MA_SRC_SINC_MAX_WINDOW_WIDTH*2/8]; - __m256 a[MA_SRC_SINC_MAX_WINDOW_WIDTH*2/8]; - __m256 resolution256; - __m256 t; - __m256 r; - ma_int32 windowWidth8; - ma_int32 iWindow8; - - resolution256 = _mm256_set1_ps(MA_SRC_SINC_LOOKUP_TABLE_RESOLUTION); - - t = _mm256_set1_ps((timeIn - iTimeInF)); - r = _mm256_set1_ps(0); - - windowWidth8 = windowWidthSIMD2 >> 3; - for (iWindow8 = 0; iWindow8 < windowWidth8; iWindow8 += 1) { - __m256 w = *((__m256*)iWindowF + iWindow8); - - __m256 xabs = _mm256_sub_ps(t, w); - xabs = ma_fabsf_avx2(xabs); - xabs = _mm256_mul_ps(xabs, resolution256); - - ixabs[iWindow8] = _mm256_cvttps_epi32(xabs); - a[iWindow8] = _mm256_sub_ps(xabs, _mm256_cvtepi32_ps(ixabs[iWindow8])); - } - - for (iWindow8 = 0; iWindow8 < windowWidth8; iWindow8 += 1) { - int* ixabsv = (int*)&ixabs[iWindow8]; - - __m256 lo = _mm256_set_ps( - pSRC->sinc.table[ixabsv[7]], - pSRC->sinc.table[ixabsv[6]], - pSRC->sinc.table[ixabsv[5]], - pSRC->sinc.table[ixabsv[4]], - pSRC->sinc.table[ixabsv[3]], - pSRC->sinc.table[ixabsv[2]], - pSRC->sinc.table[ixabsv[1]], - pSRC->sinc.table[ixabsv[0]] - ); - - __m256 hi = _mm256_set_ps( - pSRC->sinc.table[ixabsv[7]+1], - pSRC->sinc.table[ixabsv[6]+1], - pSRC->sinc.table[ixabsv[5]+1], - pSRC->sinc.table[ixabsv[4]+1], - pSRC->sinc.table[ixabsv[3]+1], - pSRC->sinc.table[ixabsv[2]+1], - pSRC->sinc.table[ixabsv[1]+1], - pSRC->sinc.table[ixabsv[0]+1] - ); - - __m256 s = *((__m256*)windowSamples + iWindow8); - r = _mm256_add_ps(r, _mm256_mul_ps(s, ma_mix_f32_fast__avx2(lo, hi, a[iWindow8]))); - } - - /* Horizontal add. */ - __m256 x = _mm256_hadd_ps(r, _mm256_permute2f128_ps(r, r, 1)); - x = _mm256_hadd_ps(x, x); - x = _mm256_hadd_ps(x, x); - sampleOut += _mm_cvtss_f32(_mm256_castps256_ps128(x)); - - iWindow += windowWidth8 * 8; - } - else -#endif -#if defined(MA_SUPPORT_SSE2) - if (pSRC->useSSE2) { - __m128 t = _mm_set1_ps((timeIn - iTimeInF)); - __m128 r = _mm_set1_ps(0); - - ma_int32 windowWidth4 = windowWidthSIMD2 >> 2; - ma_int32 iWindow4; - for (iWindow4 = 0; iWindow4 < windowWidth4; iWindow4 += 1) { - __m128* s = (__m128*)windowSamples + iWindow4; - __m128* w = (__m128*)iWindowF + iWindow4; - - __m128 a = ma_src_sinc__interpolation_factor__sse2(pSRC, _mm_sub_ps(t, *w)); - r = _mm_add_ps(r, _mm_mul_ps(*s, a)); - } - - sampleOut += ((float*)(&r))[0]; - sampleOut += ((float*)(&r))[1]; - sampleOut += ((float*)(&r))[2]; - sampleOut += ((float*)(&r))[3]; - - iWindow += windowWidth4 * 4; - } - else + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} #endif #if defined(MA_SUPPORT_NEON) - if (pSRC->useNEON) { - float32x4_t t = vmovq_n_f32((timeIn - iTimeInF)); - float32x4_t r = vmovq_n_f32(0); - - ma_int32 windowWidth4 = windowWidthSIMD2 >> 2; - ma_int32 iWindow4; - for (iWindow4 = 0; iWindow4 < windowWidth4; iWindow4 += 1) { - float32x4_t* s = (float32x4_t*)windowSamples + iWindow4; - float32x4_t* w = (float32x4_t*)iWindowF + iWindow4; - - float32x4_t a = ma_src_sinc__interpolation_factor__neon(pSRC, vsubq_f32(t, *w)); - r = vaddq_f32(r, vmulq_f32(*s, a)); - } - - sampleOut += ((float*)(&r))[0]; - sampleOut += ((float*)(&r))[1]; - sampleOut += ((float*)(&r))[2]; - sampleOut += ((float*)(&r))[3]; - - iWindow += windowWidth4 * 4; - } - else +static MA_INLINE void ma_pcm_f32_to_s32__neon(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); +} #endif - { - iWindow += 1; /* The first one is a dummy for SIMD alignment purposes. Skip it. */ - } - /* Non-SIMD/Reference implementation. */ - tScalar = (timeIn - iTimeIn); - for (; iWindow < windowWidth2; iWindow += 1) { - float s = windowSamples[iWindow]; - float w = iWindowF[iWindow]; - - float a = ma_src_sinc__interpolation_factor(pSRC, (tScalar - w)); - float r = s * a; - - sampleOut += r; - } - - ppNextSamplesOut[iChannel][iSample] = (float)sampleOut; - - timeIn += factor; - } - - ppNextSamplesOut[iChannel] += outputFramesToRead; +void ma_pcm_f32_to_s32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_f32_to_s32__reference(dst, src, count, ditherMode); +#else + # if MA_PREFERRED_SIMD == MA_SIMD_AVX2 + if (ma_has_avx2()) { + ma_pcm_f32_to_s32__avx2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_SSE2 + if (ma_has_sse2()) { + ma_pcm_f32_to_s32__sse2(dst, src, count, ditherMode); + } else + #elif MA_PREFERRED_SIMD == MA_SIMD_NEON + if (ma_has_neon()) { + ma_pcm_f32_to_s32__neon(dst, src, count, ditherMode); + } else + #endif + { + ma_pcm_f32_to_s32__optimized(dst, src, count, ditherMode); } +#endif +} - totalOutputFramesRead += outputFramesToRead; - - prevWindowPosInSamples = pSRC->sinc.windowPosInSamples; - - pSRC->sinc.timeIn += ((ma_int64)outputFramesToRead * factor); /* Cast to int64 required for VC6. */ - pSRC->sinc.windowPosInSamples = (ma_uint32)pSRC->sinc.timeIn; - pSRC->sinc.inputFrameCount -= pSRC->sinc.windowPosInSamples - prevWindowPosInSamples; - /* If the window has reached a point where we cannot read a whole output sample it needs to be moved back to the start. */ - availableOutputFrames = (ma_uint32)((timeInEnd - pSRC->sinc.timeIn) * inverseFactor); +void ma_pcm_f32_to_f32(void* dst, const void* src, ma_uint64 count, ma_dither_mode ditherMode) +{ + (void)ditherMode; - if (availableOutputFrames == 0) { - size_t samplesToMove = ma_countof(pSRC->sinc.input[0]) - pSRC->sinc.windowPosInSamples; + ma_copy_memory_64(dst, src, count * sizeof(float)); +} - pSRC->sinc.timeIn -= ma_floorf(pSRC->sinc.timeIn); - pSRC->sinc.windowPosInSamples = 0; - /* Move everything from the end of the cache up to the front. */ - for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) { - memmove(pSRC->sinc.input[iChannel], pSRC->sinc.input[iChannel] + ma_countof(pSRC->sinc.input[iChannel]) - samplesToMove, samplesToMove * sizeof(*pSRC->sinc.input[iChannel])); - } - } +static void ma_pcm_interleave_f32__reference(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + float* dst_f32 = (float*)dst; + const float** src_f32 = (const float**)src; - /* Read more data from the client if required. */ - if (pSRC->isEndOfInputLoaded) { - pSRC->isEndOfInputLoaded = MA_FALSE; - break; + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iFrame*channels + iChannel] = src_f32[iChannel][iFrame]; } + } +} - /* - Everything beyond this point is reloading. If we're at the end of the input data we do _not_ want to try reading any more in this function call. If the - caller wants to keep trying, they can reload their internal data sources and call this function again. We should never be - */ - ma_assert(pSRC->isEndOfInputLoaded == MA_FALSE); - - if (pSRC->sinc.inputFrameCount <= pSRC->config.sinc.windowWidth || availableOutputFrames == 0) { - float* ppInputDst[MA_MAX_CHANNELS] = {0}; - ma_uint32 framesToReadFromClient; - ma_uint32 framesReadFromClient; - ma_uint32 leftoverFrames; - - for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) { - ppInputDst[iChannel] = pSRC->sinc.input[iChannel] + pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount; - } - - /* Now read data from the client. */ - framesToReadFromClient = ma_countof(pSRC->sinc.input[0]) - (pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount); +static void ma_pcm_interleave_f32__optimized(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +} - framesReadFromClient = 0; - if (framesToReadFromClient > 0) { - framesReadFromClient = pSRC->config.onReadDeinterleaved(pSRC, framesToReadFromClient, (void**)ppInputDst, pUserData); - } +void ma_pcm_interleave_f32(void* dst, const void** src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_interleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_interleave_f32__optimized(dst, src, frameCount, channels); +#endif +} - if (framesReadFromClient != framesToReadFromClient) { - pSRC->isEndOfInputLoaded = MA_TRUE; - } else { - pSRC->isEndOfInputLoaded = MA_FALSE; - } - if (framesReadFromClient != 0) { - pSRC->sinc.inputFrameCount += framesReadFromClient; - } else { - /* We couldn't get anything more from the client. If no more output samples can be computed from the available input samples we need to return. */ - if (pSRC->config.neverConsumeEndOfInput) { - if ((pSRC->sinc.inputFrameCount * inverseFactor) <= pSRC->config.sinc.windowWidth) { - break; - } - } else { - if ((pSRC->sinc.inputFrameCount * inverseFactor) < 1) { - break; - } - } - } +static void ma_pcm_deinterleave_f32__reference(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + float** dst_f32 = (float**)dst; + const float* src_f32 = (const float*)src; - /* Anything left over in the cache must be set to zero. */ - leftoverFrames = ma_countof(pSRC->sinc.input[0]) - (pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount); - if (leftoverFrames > 0) { - for (iChannel = 0; iChannel < pSRC->config.channels; iChannel += 1) { - ma_zero_memory(pSRC->sinc.input[iChannel] + pSRC->config.sinc.windowWidth + pSRC->sinc.inputFrameCount, leftoverFrames * sizeof(float)); - } - } + ma_uint64 iFrame; + for (iFrame = 0; iFrame < frameCount; iFrame += 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; iChannel += 1) { + dst_f32[iChannel][iFrame] = src_f32[iFrame*channels + iChannel]; } } - - return totalOutputFramesRead; } +static void ma_pcm_deinterleave_f32__optimized(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +} +void ma_pcm_deinterleave_f32(void** dst, const void* src, ma_uint64 frameCount, ma_uint32 channels) +{ +#ifdef MA_USE_REFERENCE_CONVERSION_APIS + ma_pcm_deinterleave_f32__reference(dst, src, frameCount, channels); +#else + ma_pcm_deinterleave_f32__optimized(dst, src, frameCount, channels); +#endif +} -/************************************************************************************************************************************************************** - -Format Conversion -**************************************************************************************************************************************************************/ void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 sampleCount, ma_dither_mode ditherMode) { if (formatOut == formatIn) { @@ -31992,6 +32834,11 @@ void ma_pcm_convert(void* pOut, ma_format formatOut, const void* pIn, ma_format } } +void ma_convert_pcm_frames_format(void* pOut, ma_format formatOut, const void* pIn, ma_format formatIn, ma_uint64 frameCount, ma_uint32 channels, ma_dither_mode ditherMode) +{ + ma_pcm_convert(pOut, formatOut, pIn, formatIn, frameCount * channels, ditherMode); +} + void ma_deinterleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames) { if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) { @@ -32090,610 +32937,721 @@ void ma_interleave_pcm_frames(ma_format format, ma_uint32 channels, ma_uint64 fr -typedef struct +/************************************************************************************************************************************************************** + +Channel Maps + +**************************************************************************************************************************************************************/ +static void ma_get_standard_channel_map_microsoft(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + /* Based off the speaker configurations mentioned here: https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ksmedia/ns-ksmedia-ksaudio_channel_config */ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + } break; + + case 3: /* Not defined, but best guess. */ + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { +#ifndef MA_USE_QUAD_MICROSOFT_CHANNEL_MAP + /* Surround. Using the Surround profile has the advantage of the 3rd channel (MA_CHANNEL_FRONT_CENTER) mapping nicely with higher channel counts. */ + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_CENTER; +#else + /* Quad. */ + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; +#endif + } break; + + case 5: /* Not defined, but best guess. */ + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_SIDE_LEFT; + channelMap[5] = MA_CHANNEL_SIDE_RIGHT; + } break; + + case 7: /* Not defined, but best guess. */ + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_CENTER; + channelMap[5] = MA_CHANNEL_SIDE_LEFT; + channelMap[6] = MA_CHANNEL_SIDE_RIGHT; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_LEFT; + channelMap[5] = MA_CHANNEL_BACK_RIGHT; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} + +static void ma_get_standard_channel_map_alsa(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_pcm_converter* pDSP; - void* pUserDataForClient; -} ma_pcm_converter_callback_data; + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; + + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } break; + + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + } break; + + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + channelMap[6] = MA_CHANNEL_BACK_CENTER; + } break; + + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } + + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); + } + } +} -ma_uint32 ma_pcm_converter__pre_format_converter_on_read(ma_format_converter* pConverter, ma_uint32 frameCount, void* pFramesOut, void* pUserData) +static void ma_get_standard_channel_map_rfc3551(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_pcm_converter_callback_data* pData; - ma_pcm_converter* pDSP; + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; + + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; - (void)pConverter; + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_BACK_CENTER; + } break; - pData = (ma_pcm_converter_callback_data*)pUserData; - ma_assert(pData != NULL); + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; - pDSP = pData->pDSP; - ma_assert(pDSP != NULL); + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_SIDE_LEFT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_FRONT_RIGHT; + channelMap[4] = MA_CHANNEL_SIDE_RIGHT; + channelMap[5] = MA_CHANNEL_BACK_CENTER; + } break; + } - return pDSP->onRead(pDSP, pFramesOut, frameCount, pData->pUserDataForClient); + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6)); + } + } } -ma_uint32 ma_pcm_converter__post_format_converter_on_read(ma_format_converter* pConverter, ma_uint32 frameCount, void* pFramesOut, void* pUserData) +static void ma_get_standard_channel_map_flac(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_pcm_converter_callback_data* pData; - ma_pcm_converter* pDSP; - - (void)pConverter; - - pData = (ma_pcm_converter_callback_data*)pUserData; - ma_assert(pData != NULL); + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; - pDSP = pData->pDSP; - ma_assert(pDSP != NULL); + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; - /* When this version of this callback is used it means we're reading directly from the client. */ - ma_assert(pDSP->isPreFormatConversionRequired == MA_FALSE); - ma_assert(pDSP->isChannelRoutingRequired == MA_FALSE); - ma_assert(pDSP->isSRCRequired == MA_FALSE); + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; - return pDSP->onRead(pDSP, pFramesOut, frameCount, pData->pUserDataForClient); -} + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; -ma_uint32 ma_pcm_converter__post_format_converter_on_read_deinterleaved(ma_format_converter* pConverter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData) -{ - ma_pcm_converter_callback_data* pData; - ma_pcm_converter* pDSP; + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; - (void)pConverter; + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_LEFT; + channelMap[5] = MA_CHANNEL_BACK_RIGHT; + } break; - pData = (ma_pcm_converter_callback_data*)pUserData; - ma_assert(pData != NULL); + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_CENTER; + channelMap[5] = MA_CHANNEL_SIDE_LEFT; + channelMap[6] = MA_CHANNEL_SIDE_RIGHT; + } break; - pDSP = pData->pDSP; - ma_assert(pDSP != NULL); + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + channelMap[3] = MA_CHANNEL_LFE; + channelMap[4] = MA_CHANNEL_BACK_LEFT; + channelMap[5] = MA_CHANNEL_BACK_RIGHT; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; + } - if (!pDSP->isChannelRoutingAtStart) { - return (ma_uint32)ma_channel_router_read_deinterleaved(&pDSP->channelRouter, frameCount, ppSamplesOut, pUserData); - } else { - if (pDSP->isSRCRequired) { - return (ma_uint32)ma_src_read_deinterleaved(&pDSP->src, frameCount, ppSamplesOut, pUserData); - } else { - return (ma_uint32)ma_channel_router_read_deinterleaved(&pDSP->channelRouter, frameCount, ppSamplesOut, pUserData); + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); } } } -ma_uint32 ma_pcm_converter__src_on_read_deinterleaved(ma_src* pSRC, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData) +static void ma_get_standard_channel_map_vorbis(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_pcm_converter_callback_data* pData; - ma_pcm_converter* pDSP; - - (void)pSRC; + /* In Vorbis' type 0 channel mapping, the first two channels are not always the standard left/right - it will have the center speaker where the right usually goes. Why?! */ + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; - pData = (ma_pcm_converter_callback_data*)pUserData; - ma_assert(pData != NULL); + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; - pDSP = pData->pDSP; - ma_assert(pDSP != NULL); + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + } break; - /* If the channel routing stage is at the front we need to read from that. Otherwise we read from the pre format converter. */ - if (pDSP->isChannelRoutingAtStart) { - return (ma_uint32)ma_channel_router_read_deinterleaved(&pDSP->channelRouter, frameCount, ppSamplesOut, pUserData); - } else { - return (ma_uint32)ma_format_converter_read_deinterleaved(&pDSP->formatConverterIn, frameCount, ppSamplesOut, pUserData); - } -} + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; -ma_uint32 ma_pcm_converter__channel_router_on_read_deinterleaved(ma_channel_router* pRouter, ma_uint32 frameCount, void** ppSamplesOut, void* pUserData) -{ - ma_pcm_converter_callback_data* pData; - ma_pcm_converter* pDSP; + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + } break; - (void)pRouter; + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_BACK_LEFT; + channelMap[4] = MA_CHANNEL_BACK_RIGHT; + channelMap[5] = MA_CHANNEL_LFE; + } break; - pData = (ma_pcm_converter_callback_data*)pUserData; - ma_assert(pData != NULL); + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_SIDE_LEFT; + channelMap[4] = MA_CHANNEL_SIDE_RIGHT; + channelMap[5] = MA_CHANNEL_BACK_CENTER; + channelMap[6] = MA_CHANNEL_LFE; + } break; - pDSP = pData->pDSP; - ma_assert(pDSP != NULL); + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_CENTER; + channelMap[2] = MA_CHANNEL_FRONT_RIGHT; + channelMap[3] = MA_CHANNEL_SIDE_LEFT; + channelMap[4] = MA_CHANNEL_SIDE_RIGHT; + channelMap[5] = MA_CHANNEL_BACK_LEFT; + channelMap[6] = MA_CHANNEL_BACK_RIGHT; + channelMap[7] = MA_CHANNEL_LFE; + } break; + } - /* If the channel routing stage is at the front of the pipeline we read from the pre format converter. Otherwise we read from the sample rate converter. */ - if (pDSP->isChannelRoutingAtStart) { - return (ma_uint32)ma_format_converter_read_deinterleaved(&pDSP->formatConverterIn, frameCount, ppSamplesOut, pUserData); - } else { - if (pDSP->isSRCRequired) { - return (ma_uint32)ma_src_read_deinterleaved(&pDSP->src, frameCount, ppSamplesOut, pUserData); - } else { - return (ma_uint32)ma_format_converter_read_deinterleaved(&pDSP->formatConverterIn, frameCount, ppSamplesOut, pUserData); + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); } } } -ma_result ma_pcm_converter_init(const ma_pcm_converter_config* pConfig, ma_pcm_converter* pDSP) +static void ma_get_standard_channel_map_sound4(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_result result; - - if (pDSP == NULL) { - return MA_INVALID_ARGS; - } - - ma_zero_object(pDSP); - pDSP->onRead = pConfig->onRead; - pDSP->pUserData = pConfig->pUserData; - pDSP->isDynamicSampleRateAllowed = pConfig->allowDynamicSampleRate; - - /* - In general, this is the pipeline used for data conversion. Note that this can actually change which is explained later. - - Pre Format Conversion -> Sample Rate Conversion -> Channel Routing -> Post Format Conversion - - Pre Format Conversion - --------------------- - This is where the sample data is converted to a format that's usable by the later stages in the pipeline. Input data - is converted to deinterleaved floating-point. - - Channel Routing - --------------- - Channel routing is where stereo is converted to 5.1, mono is converted to stereo, etc. This stage depends on the - pre format conversion stage. - - Sample Rate Conversion - ---------------------- - Sample rate conversion depends on the pre format conversion stage and as the name implies performs sample rate conversion. - - Post Format Conversion - ---------------------- - This stage is where our deinterleaved floating-point data from the previous stages are converted to the requested output - format. - - - Optimizations - ------------- - Sometimes the conversion pipeline is rearranged for efficiency. The first obvious optimization is to eliminate unnecessary - stages in the pipeline. When no channel routing nor sample rate conversion is necessary, the entire pipeline is optimized - down to just this: - - Post Format Conversion - - When sample rate conversion is not unnecessary: - - Pre Format Conversion -> Channel Routing -> Post Format Conversion - - When channel routing is unnecessary: - - Pre Format Conversion -> Sample Rate Conversion -> Post Format Conversion - - A slightly less obvious optimization is used depending on whether or not we are increasing or decreasing the number of - channels. Because everything in the pipeline works on a per-channel basis, the efficiency of the pipeline is directly - proportionate to the number of channels that need to be processed. Therefore, it's can be more efficient to move the - channel conversion stage to an earlier or later stage. When the channel count is being reduced, we move the channel - conversion stage to the start of the pipeline so that later stages can work on a smaller number of channels at a time. - Otherwise, we move the channel conversion stage to the end of the pipeline. When reducing the channel count, the pipeline - will look like this: - - Pre Format Conversion -> Channel Routing -> Sample Rate Conversion -> Post Format Conversion - - Notice how the Channel Routing and Sample Rate Conversion stages are swapped so that the SRC stage has less data to process. - */ - - /* First we need to determine what's required and what's not. */ - if (pConfig->sampleRateIn != pConfig->sampleRateOut || pConfig->allowDynamicSampleRate) { - pDSP->isSRCRequired = MA_TRUE; - } - if (pConfig->channelsIn != pConfig->channelsOut || !ma_channel_map_equal(pConfig->channelsIn, pConfig->channelMapIn, pConfig->channelMapOut)) { - pDSP->isChannelRoutingRequired = MA_TRUE; - } - - /* If neither a sample rate conversion nor channel conversion is necessary we can skip the pre format conversion. */ - if (!pDSP->isSRCRequired && !pDSP->isChannelRoutingRequired) { - /* We don't need a pre format conversion stage, but we may still need a post format conversion stage. */ - if (pConfig->formatIn != pConfig->formatOut) { - pDSP->isPostFormatConversionRequired = MA_TRUE; - } - } else { - pDSP->isPreFormatConversionRequired = MA_TRUE; - pDSP->isPostFormatConversionRequired = MA_TRUE; - } + switch (channels) + { + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; - /* Use a passthrough if none of the stages are being used. */ - if (!pDSP->isPreFormatConversionRequired && !pDSP->isPostFormatConversionRequired && !pDSP->isChannelRoutingRequired && !pDSP->isSRCRequired) { - pDSP->isPassthrough = MA_TRUE; - } + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; - /* Move the channel conversion stage to the start of the pipeline if we are reducing the channel count. */ - if (pConfig->channelsOut < pConfig->channelsIn) { - pDSP->isChannelRoutingAtStart = MA_TRUE; - } + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_CENTER; + } break; + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; - /* - We always initialize every stage of the pipeline regardless of whether or not the stage is used because it simplifies - a few things when it comes to dynamically changing properties post-initialization. - */ - result = MA_SUCCESS; + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } break; - /* Pre format conversion. */ - { - ma_format_converter_config preFormatConverterConfig = ma_format_converter_config_init( - pConfig->formatIn, - ma_format_f32, - pConfig->channelsIn, - ma_pcm_converter__pre_format_converter_on_read, - pDSP - ); - preFormatConverterConfig.ditherMode = pConfig->ditherMode; - preFormatConverterConfig.noSSE2 = pConfig->noSSE2; - preFormatConverterConfig.noAVX2 = pConfig->noAVX2; - preFormatConverterConfig.noAVX512 = pConfig->noAVX512; - preFormatConverterConfig.noNEON = pConfig->noNEON; - - result = ma_format_converter_init(&preFormatConverterConfig, &pDSP->formatConverterIn); - if (result != MA_SUCCESS) { - return result; - } - } + case 6: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + } break; - /* - Post format conversion. The exact configuration for this depends on whether or not we are reading data directly from the client - or from an earlier stage in the pipeline. - */ - { - ma_format_converter_config postFormatConverterConfig = ma_format_converter_config_init_new(); - postFormatConverterConfig.formatIn = pConfig->formatIn; - postFormatConverterConfig.formatOut = pConfig->formatOut; - postFormatConverterConfig.channels = pConfig->channelsOut; - postFormatConverterConfig.ditherMode = pConfig->ditherMode; - postFormatConverterConfig.noSSE2 = pConfig->noSSE2; - postFormatConverterConfig.noAVX2 = pConfig->noAVX2; - postFormatConverterConfig.noAVX512 = pConfig->noAVX512; - postFormatConverterConfig.noNEON = pConfig->noNEON; - if (pDSP->isPreFormatConversionRequired) { - postFormatConverterConfig.onReadDeinterleaved = ma_pcm_converter__post_format_converter_on_read_deinterleaved; - postFormatConverterConfig.formatIn = ma_format_f32; - } else { - postFormatConverterConfig.onRead = ma_pcm_converter__post_format_converter_on_read; - } + case 7: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_BACK_CENTER; + channelMap[6] = MA_CHANNEL_LFE; + } break; - result = ma_format_converter_init(&postFormatConverterConfig, &pDSP->formatConverterOut); - if (result != MA_SUCCESS) { - return result; - } + case 8: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + channelMap[6] = MA_CHANNEL_SIDE_LEFT; + channelMap[7] = MA_CHANNEL_SIDE_RIGHT; + } break; } - /* SRC */ - { - ma_src_config srcConfig = ma_src_config_init( - pConfig->sampleRateIn, - pConfig->sampleRateOut, - ((pConfig->channelsIn < pConfig->channelsOut) ? pConfig->channelsIn : pConfig->channelsOut), - ma_pcm_converter__src_on_read_deinterleaved, - pDSP - ); - srcConfig.algorithm = pConfig->srcAlgorithm; - srcConfig.neverConsumeEndOfInput = pConfig->neverConsumeEndOfInput; - srcConfig.noSSE2 = pConfig->noSSE2; - srcConfig.noAVX2 = pConfig->noAVX2; - srcConfig.noAVX512 = pConfig->noAVX512; - srcConfig.noNEON = pConfig->noNEON; - ma_copy_memory(&srcConfig.sinc, &pConfig->sinc, sizeof(pConfig->sinc)); - - result = ma_src_init(&srcConfig, &pDSP->src); - if (result != MA_SUCCESS) { - return result; + /* Remainder. */ + if (channels > 8) { + ma_uint32 iChannel; + for (iChannel = 8; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-8)); } } +} - /* Channel conversion */ +static void ma_get_standard_channel_map_sndio(ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) +{ + switch (channels) { - ma_channel_router_config routerConfig = ma_channel_router_config_init( - pConfig->channelsIn, - pConfig->channelMapIn, - pConfig->channelsOut, - pConfig->channelMapOut, - pConfig->channelMixMode, - ma_pcm_converter__channel_router_on_read_deinterleaved, - pDSP); - routerConfig.noSSE2 = pConfig->noSSE2; - routerConfig.noAVX2 = pConfig->noAVX2; - routerConfig.noAVX512 = pConfig->noAVX512; - routerConfig.noNEON = pConfig->noNEON; - - result = ma_channel_router_init(&routerConfig, &pDSP->channelRouter); - if (result != MA_SUCCESS) { - return result; - } - } + case 1: + { + channelMap[0] = MA_CHANNEL_MONO; + } break; - return MA_SUCCESS; -} + case 2: + { + channelMap[0] = MA_CHANNEL_LEFT; + channelMap[1] = MA_CHANNEL_RIGHT; + } break; + case 3: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_FRONT_CENTER; + } break; -ma_result ma_pcm_converter_refresh_sample_rate(ma_pcm_converter* pDSP) -{ - /* The SRC stage will already have been initialized so we can just set it there. */ - ma_src_set_sample_rate(&pDSP->src, pDSP->src.config.sampleRateIn, pDSP->src.config.sampleRateOut); - return MA_SUCCESS; -} + case 4: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + } break; -ma_result ma_pcm_converter_set_input_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateIn) -{ - if (pDSP == NULL) { - return MA_INVALID_ARGS; - } + case 5: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + } break; - /* Must have a sample rate of > 0. */ - if (sampleRateIn == 0) { - return MA_INVALID_ARGS; + case 6: + default: + { + channelMap[0] = MA_CHANNEL_FRONT_LEFT; + channelMap[1] = MA_CHANNEL_FRONT_RIGHT; + channelMap[2] = MA_CHANNEL_BACK_LEFT; + channelMap[3] = MA_CHANNEL_BACK_RIGHT; + channelMap[4] = MA_CHANNEL_FRONT_CENTER; + channelMap[5] = MA_CHANNEL_LFE; + } break; } - /* Must have been initialized with allowDynamicSampleRate. */ - if (!pDSP->isDynamicSampleRateAllowed) { - return MA_INVALID_OPERATION; + /* Remainder. */ + if (channels > 6) { + ma_uint32 iChannel; + for (iChannel = 6; iChannel < MA_MAX_CHANNELS; ++iChannel) { + channelMap[iChannel] = (ma_channel)(MA_CHANNEL_AUX_0 + (iChannel-6)); + } } - - ma_atomic_exchange_32(&pDSP->src.config.sampleRateIn, sampleRateIn); - return ma_pcm_converter_refresh_sample_rate(pDSP); } -ma_result ma_pcm_converter_set_output_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateOut) +void ma_get_standard_channel_map(ma_standard_channel_map standardChannelMap, ma_uint32 channels, ma_channel channelMap[MA_MAX_CHANNELS]) { - if (pDSP == NULL) { - return MA_INVALID_ARGS; - } + switch (standardChannelMap) + { + case ma_standard_channel_map_alsa: + { + ma_get_standard_channel_map_alsa(channels, channelMap); + } break; - /* Must have a sample rate of > 0. */ - if (sampleRateOut == 0) { - return MA_INVALID_ARGS; - } + case ma_standard_channel_map_rfc3551: + { + ma_get_standard_channel_map_rfc3551(channels, channelMap); + } break; - /* Must have been initialized with allowDynamicSampleRate. */ - if (!pDSP->isDynamicSampleRateAllowed) { - return MA_INVALID_OPERATION; - } + case ma_standard_channel_map_flac: + { + ma_get_standard_channel_map_flac(channels, channelMap); + } break; - ma_atomic_exchange_32(&pDSP->src.config.sampleRateOut, sampleRateOut); - return ma_pcm_converter_refresh_sample_rate(pDSP); -} + case ma_standard_channel_map_vorbis: + { + ma_get_standard_channel_map_vorbis(channels, channelMap); + } break; -ma_result ma_pcm_converter_set_sample_rate(ma_pcm_converter* pDSP, ma_uint32 sampleRateIn, ma_uint32 sampleRateOut) -{ - if (pDSP == NULL) { - return MA_INVALID_ARGS; - } + case ma_standard_channel_map_sound4: + { + ma_get_standard_channel_map_sound4(channels, channelMap); + } break; + + case ma_standard_channel_map_sndio: + { + ma_get_standard_channel_map_sndio(channels, channelMap); + } break; - /* Must have a sample rate of > 0. */ - if (sampleRateIn == 0 || sampleRateOut == 0) { - return MA_INVALID_ARGS; + case ma_standard_channel_map_microsoft: + default: + { + ma_get_standard_channel_map_microsoft(channels, channelMap); + } break; } +} - /* Must have been initialized with allowDynamicSampleRate. */ - if (!pDSP->isDynamicSampleRateAllowed) { - return MA_INVALID_OPERATION; +void ma_channel_map_copy(ma_channel* pOut, const ma_channel* pIn, ma_uint32 channels) +{ + if (pOut != NULL && pIn != NULL && channels > 0) { + MA_COPY_MEMORY(pOut, pIn, sizeof(*pOut) * channels); } - - ma_atomic_exchange_32(&pDSP->src.config.sampleRateIn, sampleRateIn); - ma_atomic_exchange_32(&pDSP->src.config.sampleRateOut, sampleRateOut); - - return ma_pcm_converter_refresh_sample_rate(pDSP); } -ma_uint64 ma_pcm_converter_read(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint64 frameCount) +ma_bool32 ma_channel_map_valid(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_pcm_converter_callback_data data; - - if (pDSP == NULL || pFramesOut == NULL) { - return 0; + if (channelMap == NULL) { + return MA_FALSE; } - /* Fast path. */ - if (pDSP->isPassthrough) { - if (frameCount <= 0xFFFFFFFF) { - return (ma_uint32)pDSP->onRead(pDSP, pFramesOut, (ma_uint32)frameCount, pDSP->pUserData); - } else { - ma_uint8* pNextFramesOut = (ma_uint8*)pFramesOut; - - ma_uint64 totalFramesRead = 0; - while (totalFramesRead < frameCount) { - ma_uint32 framesRead; - ma_uint64 framesRemaining = (frameCount - totalFramesRead); - ma_uint64 framesToReadRightNow = framesRemaining; - if (framesToReadRightNow > 0xFFFFFFFF) { - framesToReadRightNow = 0xFFFFFFFF; - } - - framesRead = pDSP->onRead(pDSP, pNextFramesOut, (ma_uint32)framesToReadRightNow, pDSP->pUserData); - if (framesRead == 0) { - break; - } + /* A channel count of 0 is invalid. */ + if (channels == 0) { + return MA_FALSE; + } - pNextFramesOut += framesRead * pDSP->channelRouter.config.channelsOut * ma_get_bytes_per_sample(pDSP->formatConverterOut.config.formatOut); - totalFramesRead += framesRead; + /* It does not make sense to have a mono channel when there is more than 1 channel. */ + if (channels > 1) { + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMap[iChannel] == MA_CHANNEL_MONO) { + return MA_FALSE; } - - return totalFramesRead; } } - /* Slower path. The real work is done here. To do this all we need to do is read from the last stage in the pipeline. */ - ma_assert(pDSP->isPostFormatConversionRequired == MA_TRUE); - - data.pDSP = pDSP; - data.pUserDataForClient = pDSP->pUserData; - return ma_format_converter_read(&pDSP->formatConverterOut, frameCount, pFramesOut, &data); + return MA_TRUE; } - -typedef struct -{ - const void* pDataIn; - ma_format formatIn; - ma_uint32 channelsIn; - ma_uint64 totalFrameCount; - ma_uint64 iNextFrame; - ma_bool32 isFeedingZeros; /* When set to true, feeds the DSP zero samples. */ -} ma_convert_frames__data; - -ma_uint32 ma_convert_frames__on_read(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +ma_bool32 ma_channel_map_equal(ma_uint32 channels, const ma_channel channelMapA[MA_MAX_CHANNELS], const ma_channel channelMapB[MA_MAX_CHANNELS]) { - ma_convert_frames__data* pData; - ma_uint32 framesToRead; - ma_uint64 framesRemaining; - ma_uint32 frameSizeInBytes; - - (void)pDSP; - - pData = (ma_convert_frames__data*)pUserData; - ma_assert(pData != NULL); - ma_assert(pData->totalFrameCount >= pData->iNextFrame); + ma_uint32 iChannel; - framesToRead = frameCount; - framesRemaining = (pData->totalFrameCount - pData->iNextFrame); - if (framesToRead > framesRemaining) { - framesToRead = (ma_uint32)framesRemaining; + if (channelMapA == channelMapB) { + return MA_FALSE; } - frameSizeInBytes = ma_get_bytes_per_frame(pData->formatIn, pData->channelsIn); + if (channels == 0 || channels > MA_MAX_CHANNELS) { + return MA_FALSE; + } - if (!pData->isFeedingZeros) { - ma_copy_memory(pFramesOut, (const ma_uint8*)pData->pDataIn + (frameSizeInBytes * pData->iNextFrame), frameSizeInBytes * framesToRead); - } else { - ma_zero_memory(pFramesOut, frameSizeInBytes * framesToRead); + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMapA[iChannel] != channelMapB[iChannel]) { + return MA_FALSE; + } } - pData->iNextFrame += framesToRead; - return framesToRead; + return MA_TRUE; } -ma_pcm_converter_config ma_pcm_converter_config_init_new() +ma_bool32 ma_channel_map_blank(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS]) { - ma_pcm_converter_config config; - ma_zero_object(&config); + ma_uint32 iChannel; - return config; -} + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMap[iChannel] != MA_CHANNEL_NONE) { + return MA_FALSE; + } + } -ma_pcm_converter_config ma_pcm_converter_config_init(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_pcm_converter_read_proc onRead, void* pUserData) -{ - return ma_pcm_converter_config_init_ex(formatIn, channelsIn, sampleRateIn, NULL, formatOut, channelsOut, sampleRateOut, NULL, onRead, pUserData); + return MA_TRUE; } -ma_pcm_converter_config ma_pcm_converter_config_init_ex(ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], ma_pcm_converter_read_proc onRead, void* pUserData) +ma_bool32 ma_channel_map_contains_channel_position(ma_uint32 channels, const ma_channel channelMap[MA_MAX_CHANNELS], ma_channel channelPosition) { - ma_pcm_converter_config config; - ma_zero_object(&config); - config.formatIn = formatIn; - config.channelsIn = channelsIn; - config.sampleRateIn = sampleRateIn; - config.formatOut = formatOut; - config.channelsOut = channelsOut; - config.sampleRateOut = sampleRateOut; - if (channelMapIn != NULL) { - ma_copy_memory(config.channelMapIn, channelMapIn, sizeof(config.channelMapIn)); - } - if (channelMapOut != NULL) { - ma_copy_memory(config.channelMapOut, channelMapOut, sizeof(config.channelMapOut)); + ma_uint32 iChannel; + for (iChannel = 0; iChannel < channels; ++iChannel) { + if (channelMap[iChannel] == channelPosition) { + return MA_TRUE; + } } - config.onRead = onRead; - config.pUserData = pUserData; - return config; + return MA_FALSE; } -ma_uint64 ma_convert_frames(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_uint64 frameCount) -{ - ma_channel channelMapOut[MA_MAX_CHANNELS]; - ma_channel channelMapIn[MA_MAX_CHANNELS]; - - ma_get_standard_channel_map(ma_standard_channel_map_default, channelsOut, channelMapOut); - ma_get_standard_channel_map(ma_standard_channel_map_default, channelsIn, channelMapIn); +/************************************************************************************************************************************************************** - return ma_convert_frames_ex(pOut, formatOut, channelsOut, sampleRateOut, channelMapOut, pIn, formatIn, channelsIn, sampleRateIn, channelMapIn, frameCount); -} +Conversion Helpers -ma_uint64 ma_convert_frames_ex(void* pOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, ma_channel channelMapOut[MA_MAX_CHANNELS], const void* pIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn, ma_channel channelMapIn[MA_MAX_CHANNELS], ma_uint64 frameCount) +**************************************************************************************************************************************************************/ +ma_uint64 ma_convert_frames(void* pOut, ma_uint64 frameCountOut, ma_format formatOut, ma_uint32 channelsOut, ma_uint32 sampleRateOut, const void* pIn, ma_uint64 frameCountIn, ma_format formatIn, ma_uint32 channelsIn, ma_uint32 sampleRateIn) { - ma_uint64 frameCountOut; - ma_convert_frames__data data; - ma_pcm_converter_config converterConfig; - ma_pcm_converter converter; - ma_uint64 totalFramesRead; - - if (frameCount == 0) { - return 0; - } - - frameCountOut = ma_calculate_frame_count_after_src(sampleRateOut, sampleRateIn, frameCount); - if (pOut == NULL) { - return frameCountOut; - } - - data.pDataIn = pIn; - data.formatIn = formatIn; - data.channelsIn = channelsIn; - data.totalFrameCount = frameCount; - data.iNextFrame = 0; - data.isFeedingZeros = MA_FALSE; - - ma_zero_object(&converterConfig); - - converterConfig.formatIn = formatIn; - converterConfig.channelsIn = channelsIn; - converterConfig.sampleRateIn = sampleRateIn; - if (channelMapIn != NULL) { - ma_channel_map_copy(converterConfig.channelMapIn, channelMapIn, channelsIn); - } else { - ma_get_standard_channel_map(ma_standard_channel_map_default, converterConfig.channelsIn, converterConfig.channelMapIn); - } - - converterConfig.formatOut = formatOut; - converterConfig.channelsOut = channelsOut; - converterConfig.sampleRateOut = sampleRateOut; - if (channelMapOut != NULL) { - ma_channel_map_copy(converterConfig.channelMapOut, channelMapOut, channelsOut); - } else { - ma_get_standard_channel_map(ma_standard_channel_map_default, converterConfig.channelsOut, converterConfig.channelMapOut); - } - - converterConfig.onRead = ma_convert_frames__on_read; - converterConfig.pUserData = &data; - - if (ma_pcm_converter_init(&converterConfig, &converter) != MA_SUCCESS) { - return 0; - } + ma_data_converter_config config; - /* - Always output our computed frame count. There is a chance the sample rate conversion routine may not output the last sample - due to precision issues with 32-bit floats, in which case we should feed the DSP zero samples so it can generate that last - frame. - */ - totalFramesRead = ma_pcm_converter_read(&converter, pOut, frameCountOut); - if (totalFramesRead < frameCountOut) { - ma_uint32 bpfOut = ma_get_bytes_per_frame(formatOut, channelsOut); + config = ma_data_converter_config_init(formatIn, formatOut, channelsIn, channelsOut, sampleRateIn, sampleRateOut); + ma_get_standard_channel_map(ma_standard_channel_map_default, channelsOut, config.channelMapOut); + ma_get_standard_channel_map(ma_standard_channel_map_default, channelsIn, config.channelMapIn); - data.isFeedingZeros = MA_TRUE; - data.totalFrameCount = ((ma_uint64)0xFFFFFFFF << 32) | 0xFFFFFFFF; /* C89 does not support 64-bit constants so need to instead construct it like this. Annoying... */ /*data.totalFrameCount = 0xFFFFFFFFFFFFFFFF;*/ - data.pDataIn = NULL; + /* For this we can default to the best resampling available since it's most likely going to be called in non time critical situations. */ + config.resampling.linear.lpfCount = MA_MAX_RESAMPLER_LPF_FILTERS; - while (totalFramesRead < frameCountOut) { - ma_uint64 framesToRead; - ma_uint64 framesJustRead; + return ma_convert_frames_ex(pOut, frameCountOut, pIn, frameCountIn, &config); +} - framesToRead = (frameCountOut - totalFramesRead); - ma_assert(framesToRead > 0); +ma_uint64 ma_convert_frames_ex(void* pOut, ma_uint64 frameCountOut, const void* pIn, ma_uint64 frameCountIn, const ma_data_converter_config* pConfig) +{ + ma_result result; + ma_data_converter converter; - framesJustRead = ma_pcm_converter_read(&converter, ma_offset_ptr(pOut, totalFramesRead * bpfOut), framesToRead); - totalFramesRead += framesJustRead; + if (frameCountIn == 0 || pConfig == NULL) { + return 0; + } - if (framesJustRead < framesToRead) { - break; - } - } + result = ma_data_converter_init(pConfig, &converter); + if (result != MA_SUCCESS) { + return 0; /* Failed to initialize the data converter. */ + } - /* At this point we should have output every sample, but just to be super duper sure, just fill the rest with zeros. */ - if (totalFramesRead < frameCountOut) { - ma_zero_memory_64(ma_offset_ptr(pOut, totalFramesRead * bpfOut), ((frameCountOut - totalFramesRead) * bpfOut)); - totalFramesRead = frameCountOut; + if (pOut == NULL) { + frameCountOut = ma_data_converter_get_expected_output_frame_count(&converter, frameCountIn); + } else { + result = ma_data_converter_process_pcm_frames(&converter, pIn, &frameCountIn, pOut, &frameCountOut); + if (result != MA_SUCCESS) { + frameCountOut = 0; } } - ma_assert(totalFramesRead == frameCountOut); - return totalFramesRead; + ma_data_converter_uninit(&converter); + return frameCountOut; } @@ -32714,13 +33672,13 @@ MA_INLINE ma_uint32 ma_rb__extract_offset_loop_flag(ma_uint32 encodedOffset) MA_INLINE void* ma_rb__get_read_ptr(ma_rb* pRB) { - ma_assert(pRB != NULL); + MA_ASSERT(pRB != NULL); return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedReadOffset)); } MA_INLINE void* ma_rb__get_write_ptr(ma_rb* pRB) { - ma_assert(pRB != NULL); + MA_ASSERT(pRB != NULL); return ma_offset_ptr(pRB->pBuffer, ma_rb__extract_offset_in_bytes(pRB->encodedWriteOffset)); } @@ -32731,16 +33689,17 @@ MA_INLINE ma_uint32 ma_rb__construct_offset(ma_uint32 offsetInBytes, ma_uint32 o MA_INLINE void ma_rb__deconstruct_offset(ma_uint32 encodedOffset, ma_uint32* pOffsetInBytes, ma_uint32* pOffsetLoopFlag) { - ma_assert(pOffsetInBytes != NULL); - ma_assert(pOffsetLoopFlag != NULL); + MA_ASSERT(pOffsetInBytes != NULL); + MA_ASSERT(pOffsetLoopFlag != NULL); *pOffsetInBytes = ma_rb__extract_offset_in_bytes(encodedOffset); *pOffsetLoopFlag = ma_rb__extract_offset_loop_flag(encodedOffset); } -ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB) +ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size_t subbufferStrideInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) { + ma_result result; const ma_uint32 maxSubBufferSize = 0x7FFFFFFF - (MA_SIMD_ALIGNMENT-1); if (pRB == NULL) { @@ -32756,7 +33715,13 @@ ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size } - ma_zero_object(pRB); + MA_ZERO_OBJECT(pRB); + + result = ma_allocation_callbacks_init_copy(&pRB->allocationCallbacks, pAllocationCallbacks); + if (result != MA_SUCCESS) { + return result; + } + pRB->subbufferSizeInBytes = (ma_uint32)subbufferSizeInBytes; pRB->subbufferCount = (ma_uint32)subbufferCount; @@ -32773,21 +33738,21 @@ ma_result ma_rb_init_ex(size_t subbufferSizeInBytes, size_t subbufferCount, size pRB->subbufferStrideInBytes = (pRB->subbufferSizeInBytes + (MA_SIMD_ALIGNMENT-1)) & ~MA_SIMD_ALIGNMENT; bufferSizeInBytes = (size_t)pRB->subbufferCount*pRB->subbufferStrideInBytes; - pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT); + pRB->pBuffer = ma_aligned_malloc(bufferSizeInBytes, MA_SIMD_ALIGNMENT, &pRB->allocationCallbacks); if (pRB->pBuffer == NULL) { return MA_OUT_OF_MEMORY; } - ma_zero_memory(pRB->pBuffer, bufferSizeInBytes); + MA_ZERO_MEMORY(pRB->pBuffer, bufferSizeInBytes); pRB->ownsBuffer = MA_TRUE; } return MA_SUCCESS; } -ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, ma_rb* pRB) +ma_result ma_rb_init(size_t bufferSizeInBytes, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_rb* pRB) { - return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pRB); + return ma_rb_init_ex(bufferSizeInBytes, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); } void ma_rb_uninit(ma_rb* pRB) @@ -32797,7 +33762,7 @@ void ma_rb_uninit(ma_rb* pRB) } if (pRB->ownsBuffer) { - ma_aligned_free(pRB->pBuffer); + ma_aligned_free(pRB->pBuffer, &pRB->allocationCallbacks); } } @@ -32934,7 +33899,7 @@ ma_result ma_rb_acquire_write(ma_rb* pRB, size_t* pSizeInBytes, void** ppBufferO /* Clear the buffer if desired. */ if (pRB->clearOnWriteAcquire) { - ma_zero_memory(*ppBufferOut, *pSizeInBytes); + MA_ZERO_MEMORY(*ppBufferOut, *pSizeInBytes); } return MA_SUCCESS; @@ -33161,12 +34126,12 @@ void* ma_rb_get_subbuffer_ptr(ma_rb* pRB, size_t subbufferIndex, void* pBuffer) static MA_INLINE ma_uint32 ma_pcm_rb_get_bpf(ma_pcm_rb* pRB) { - ma_assert(pRB != NULL); + MA_ASSERT(pRB != NULL); return ma_get_bytes_per_frame(pRB->format, pRB->channels); } -ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB) +ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subbufferSizeInFrames, ma_uint32 subbufferCount, ma_uint32 subbufferStrideInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) { ma_uint32 bpf; ma_result result; @@ -33175,14 +34140,14 @@ ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subb return MA_INVALID_ARGS; } - ma_zero_object(pRB); + MA_ZERO_OBJECT(pRB); bpf = ma_get_bytes_per_frame(format, channels); if (bpf == 0) { return MA_INVALID_ARGS; } - result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, &pRB->rb); + result = ma_rb_init_ex(subbufferSizeInFrames*bpf, subbufferCount, subbufferStrideInFrames*bpf, pOptionalPreallocatedBuffer, pAllocationCallbacks, &pRB->rb); if (result != MA_SUCCESS) { return result; } @@ -33193,9 +34158,9 @@ ma_result ma_pcm_rb_init_ex(ma_format format, ma_uint32 channels, ma_uint32 subb return MA_SUCCESS; } -ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, ma_pcm_rb* pRB) +ma_result ma_pcm_rb_init(ma_format format, ma_uint32 channels, ma_uint32 bufferSizeInFrames, void* pOptionalPreallocatedBuffer, const ma_allocation_callbacks* pAllocationCallbacks, ma_pcm_rb* pRB) { - return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pRB); + return ma_pcm_rb_init_ex(format, channels, bufferSizeInFrames, 1, 0, pOptionalPreallocatedBuffer, pAllocationCallbacks, pRB); } void ma_pcm_rb_uninit(ma_pcm_rb* pRB) @@ -33362,22 +34327,38 @@ void* ma_pcm_rb_get_subbuffer_ptr(ma_pcm_rb* pRB, ma_uint32 subbufferIndex, void Miscellaneous Helpers **************************************************************************************************************************************************************/ -void* ma_malloc(size_t sz) +void* ma_malloc(size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) { - return MA_MALLOC(sz); + if (pAllocationCallbacks != NULL) { + return ma__malloc_from_callbacks(sz, pAllocationCallbacks); + } else { + return ma__malloc_default(sz, NULL); + } } -void* ma_realloc(void* p, size_t sz) +void* ma_realloc(void* p, size_t sz, const ma_allocation_callbacks* pAllocationCallbacks) { - return MA_REALLOC(p, sz); + if (pAllocationCallbacks != NULL) { + if (pAllocationCallbacks->onRealloc != NULL) { + return pAllocationCallbacks->onRealloc(p, sz, pAllocationCallbacks->pUserData); + } else { + return NULL; /* This requires a native implementation of realloc(). */ + } + } else { + return ma__realloc_default(p, sz, NULL); + } } -void ma_free(void* p) +void ma_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) { - MA_FREE(p); + if (pAllocationCallbacks != NULL) { + ma__free_from_callbacks(p, pAllocationCallbacks); + } else { + ma__free_default(p, NULL); + } } -void* ma_aligned_malloc(size_t sz, size_t alignment) +void* ma_aligned_malloc(size_t sz, size_t alignment, const ma_allocation_callbacks* pAllocationCallbacks) { size_t extraBytes; void* pUnaligned; @@ -33389,7 +34370,7 @@ void* ma_aligned_malloc(size_t sz, size_t alignment) extraBytes = alignment-1 + sizeof(void*); - pUnaligned = ma_malloc(sz + extraBytes); + pUnaligned = ma_malloc(sz + extraBytes, pAllocationCallbacks); if (pUnaligned == NULL) { return NULL; } @@ -33400,9 +34381,9 @@ void* ma_aligned_malloc(size_t sz, size_t alignment) return pAligned; } -void ma_aligned_free(void* p) +void ma_aligned_free(void* p, const ma_allocation_callbacks* pAllocationCallbacks) { - ma_free(((void**)p)[-1]); + ma_free(((void**)p)[-1], pAllocationCallbacks); } const char* ma_get_format_name(ma_format format) @@ -33449,12 +34430,12 @@ Decoding **************************************************************************************************************************************************************/ #ifndef MA_NO_DECODING -size_t ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) +static size_t ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) { size_t bytesRead; - ma_assert(pDecoder != NULL); - ma_assert(pBufferOut != NULL); + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pBufferOut != NULL); bytesRead = pDecoder->onRead(pDecoder, pBufferOut, bytesToRead); pDecoder->readPointer += bytesRead; @@ -33462,11 +34443,11 @@ size_t ma_decoder_read_bytes(ma_decoder* pDecoder, void* pBufferOut, size_t byte return bytesRead; } -ma_bool32 ma_decoder_seek_bytes(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) +static ma_bool32 ma_decoder_seek_bytes(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) { ma_bool32 wasSuccessful; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); wasSuccessful = pDecoder->onSeek(pDecoder, byteOffset, origin); if (wasSuccessful) { @@ -33480,49 +34461,18 @@ ma_bool32 ma_decoder_seek_bytes(ma_decoder* pDecoder, int byteOffset, ma_seek_or return wasSuccessful; } -ma_bool32 ma_decoder_seek_bytes_64(ma_decoder* pDecoder, ma_uint64 byteOffset, ma_seek_origin origin) -{ - ma_assert(pDecoder != NULL); - - if (origin == ma_seek_origin_start) { - ma_uint64 bytesToSeekThisIteration = 0x7FFFFFFF; - if (bytesToSeekThisIteration > byteOffset) { - bytesToSeekThisIteration = byteOffset; - } - - if (!ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_start)) { - return MA_FALSE; - } - - byteOffset -= bytesToSeekThisIteration; - } - - /* Getting here means we need to seek relative to the current position. */ - while (byteOffset > 0) { - ma_uint64 bytesToSeekThisIteration = 0x7FFFFFFF; - if (bytesToSeekThisIteration > byteOffset) { - bytesToSeekThisIteration = byteOffset; - } - - if (!ma_decoder_seek_bytes(pDecoder, (int)bytesToSeekThisIteration, ma_seek_origin_current)) { - return MA_FALSE; - } - - byteOffset -= bytesToSeekThisIteration; - } - - return MA_TRUE; -} - ma_decoder_config ma_decoder_config_init(ma_format outputFormat, ma_uint32 outputChannels, ma_uint32 outputSampleRate) { ma_decoder_config config; - ma_zero_object(&config); + MA_ZERO_OBJECT(&config); config.format = outputFormat; config.channels = outputChannels; config.sampleRate = outputSampleRate; ma_get_standard_channel_map(ma_standard_channel_map_default, config.channels, config.channelMap); + config.resampling.algorithm = ma_resample_algorithm_linear; + config.resampling.linear.lpfCount = 1; + config.resampling.speex.quality = 3; return config; } @@ -33533,17 +34483,17 @@ ma_decoder_config ma_decoder_config_init_copy(const ma_decoder_config* pConfig) if (pConfig != NULL) { config = *pConfig; } else { - ma_zero_object(&config); + MA_ZERO_OBJECT(&config); } return config; } -ma_result ma_decoder__init_dsp(ma_decoder* pDecoder, const ma_decoder_config* pConfig, ma_pcm_converter_read_proc onRead) +static ma_result ma_decoder__init_data_converter(ma_decoder* pDecoder, const ma_decoder_config* pConfig) { - ma_pcm_converter_config dspConfig; + ma_data_converter_config converterConfig; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); /* Output format. */ if (pConfig->format == ma_format_unknown) { @@ -33567,75 +34517,76 @@ ma_result ma_decoder__init_dsp(ma_decoder* pDecoder, const ma_decoder_config* pC if (ma_channel_map_blank(pDecoder->outputChannels, pConfig->channelMap)) { ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->outputChannels, pDecoder->outputChannelMap); } else { - ma_copy_memory(pDecoder->outputChannelMap, pConfig->channelMap, sizeof(pConfig->channelMap)); + MA_COPY_MEMORY(pDecoder->outputChannelMap, pConfig->channelMap, sizeof(pConfig->channelMap)); } + + converterConfig = ma_data_converter_config_init( + pDecoder->internalFormat, pDecoder->outputFormat, + pDecoder->internalChannels, pDecoder->outputChannels, + pDecoder->internalSampleRate, pDecoder->outputSampleRate + ); + ma_channel_map_copy(converterConfig.channelMapIn, pDecoder->internalChannelMap, pDecoder->internalChannels); + ma_channel_map_copy(converterConfig.channelMapOut, pDecoder->outputChannelMap, pDecoder->outputChannels); + converterConfig.channelMixMode = pConfig->channelMixMode; + converterConfig.ditherMode = pConfig->ditherMode; + converterConfig.resampling.allowDynamicSampleRate = MA_FALSE; /* Never allow dynamic sample rate conversion. Setting this to true will disable passthrough optimizations. */ + converterConfig.resampling.algorithm = pConfig->resampling.algorithm; + converterConfig.resampling.linear.lpfCount = pConfig->resampling.linear.lpfCount; + converterConfig.resampling.speex.quality = pConfig->resampling.speex.quality; - /* DSP. */ - dspConfig = ma_pcm_converter_config_init_ex( - pDecoder->internalFormat, pDecoder->internalChannels, pDecoder->internalSampleRate, pDecoder->internalChannelMap, - pDecoder->outputFormat, pDecoder->outputChannels, pDecoder->outputSampleRate, pDecoder->outputChannelMap, - onRead, pDecoder); - dspConfig.channelMixMode = pConfig->channelMixMode; - dspConfig.ditherMode = pConfig->ditherMode; - dspConfig.srcAlgorithm = pConfig->srcAlgorithm; - dspConfig.sinc = pConfig->src.sinc; - - return ma_pcm_converter_init(&dspConfig, &pDecoder->dsp); + return ma_data_converter_init(&converterConfig, &pDecoder->converter); } /* WAV */ #ifdef dr_wav_h #define MA_HAS_WAV -size_t ma_decoder_internal_on_read__wav(void* pUserData, void* pBufferOut, size_t bytesToRead) +static size_t ma_decoder_internal_on_read__wav(void* pUserData, void* pBufferOut, size_t bytesToRead) { ma_decoder* pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead); } -drwav_bool32 ma_decoder_internal_on_seek__wav(void* pUserData, int offset, drwav_seek_origin origin) +static drwav_bool32 ma_decoder_internal_on_seek__wav(void* pUserData, int offset, drwav_seek_origin origin) { ma_decoder* pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); return ma_decoder_seek_bytes(pDecoder, offset, (origin == drwav_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); } -ma_uint32 ma_decoder_internal_on_read_pcm_frames__wav(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__wav(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { - ma_decoder* pDecoder; drwav* pWav; - (void)pDSP; - - pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); pWav = (drwav*)pDecoder->pInternalDecoder; - ma_assert(pWav != NULL); + MA_ASSERT(pWav != NULL); switch (pDecoder->internalFormat) { - case ma_format_s16: return (ma_uint32)drwav_read_pcm_frames_s16(pWav, frameCount, (drwav_int16*)pFramesOut); - case ma_format_s32: return (ma_uint32)drwav_read_pcm_frames_s32(pWav, frameCount, (drwav_int32*)pFramesOut); - case ma_format_f32: return (ma_uint32)drwav_read_pcm_frames_f32(pWav, frameCount, (float*)pFramesOut); + case ma_format_s16: return drwav_read_pcm_frames_s16(pWav, frameCount, (drwav_int16*)pFramesOut); + case ma_format_s32: return drwav_read_pcm_frames_s32(pWav, frameCount, (drwav_int32*)pFramesOut); + case ma_format_f32: return drwav_read_pcm_frames_f32(pWav, frameCount, (float*)pFramesOut); default: break; } /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */ - ma_assert(MA_FALSE); + MA_ASSERT(MA_FALSE); return 0; } -ma_result ma_decoder_internal_on_seek_to_pcm_frame__wav(ma_decoder* pDecoder, ma_uint64 frameIndex) +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__wav(ma_decoder* pDecoder, ma_uint64 frameIndex) { drwav* pWav; drwav_bool32 result; pWav = (drwav*)pDecoder->pInternalDecoder; - ma_assert(pWav != NULL); + MA_ASSERT(pWav != NULL); result = drwav_seek_to_pcm_frame(pWav, frameIndex); if (result) { @@ -33645,42 +34596,48 @@ ma_result ma_decoder_internal_on_seek_to_pcm_frame__wav(ma_decoder* pDecoder, ma } } -ma_result ma_decoder_internal_on_uninit__wav(ma_decoder* pDecoder) +static ma_result ma_decoder_internal_on_uninit__wav(ma_decoder* pDecoder) { drwav_uninit((drwav*)pDecoder->pInternalDecoder); - ma_free(pDecoder->pInternalDecoder); + ma__free_from_callbacks(pDecoder->pInternalDecoder, &pDecoder->allocationCallbacks); return MA_SUCCESS; } -ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__wav(ma_decoder* pDecoder) +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__wav(ma_decoder* pDecoder) { return ((drwav*)pDecoder->pInternalDecoder)->totalPCMFrameCount; } -ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) { drwav* pWav; - ma_result result; + drwav_allocation_callbacks allocationCallbacks; - ma_assert(pConfig != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); - pWav = (drwav*)ma_malloc(sizeof(*pWav)); + pWav = (drwav*)ma__malloc_from_callbacks(sizeof(*pWav), &pDecoder->allocationCallbacks); if (pWav == NULL) { return MA_OUT_OF_MEMORY; } + allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree; + /* Try opening the decoder first. */ - if (!drwav_init(pWav, ma_decoder_internal_on_read__wav, ma_decoder_internal_on_seek__wav, pDecoder, NULL)) { - ma_free(pWav); + if (!drwav_init(pWav, ma_decoder_internal_on_read__wav, ma_decoder_internal_on_seek__wav, pDecoder, &allocationCallbacks)) { + ma__free_from_callbacks(pWav, &pDecoder->allocationCallbacks); return MA_ERROR; } /* If we get here it means we successfully initialized the WAV decoder. We can now initialize the rest of the ma_decoder. */ - pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__wav; - pDecoder->onUninit = ma_decoder_internal_on_uninit__wav; + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__wav; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__wav; + pDecoder->onUninit = ma_decoder_internal_on_uninit__wav; pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__wav; - pDecoder->pInternalDecoder = pWav; + pDecoder->pInternalDecoder = pWav; /* Try to be as optimal as possible for the internal format. If miniaudio does not support a format we will fall back to f32. */ pDecoder->internalFormat = ma_format_unknown; @@ -33720,69 +34677,59 @@ ma_result ma_decoder_init_wav__internal(const ma_decoder_config* pConfig, ma_dec pDecoder->internalSampleRate = pWav->sampleRate; ma_get_standard_channel_map(ma_standard_channel_map_microsoft, pDecoder->internalChannels, pDecoder->internalChannelMap); - result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__wav); - if (result != MA_SUCCESS) { - drwav_uninit(pWav); - ma_free(pWav); - return result; - } - return MA_SUCCESS; } -#endif +#endif /* dr_wav_h */ /* FLAC */ #ifdef dr_flac_h #define MA_HAS_FLAC -size_t ma_decoder_internal_on_read__flac(void* pUserData, void* pBufferOut, size_t bytesToRead) +static size_t ma_decoder_internal_on_read__flac(void* pUserData, void* pBufferOut, size_t bytesToRead) { ma_decoder* pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead); } -drflac_bool32 ma_decoder_internal_on_seek__flac(void* pUserData, int offset, drflac_seek_origin origin) +static drflac_bool32 ma_decoder_internal_on_seek__flac(void* pUserData, int offset, drflac_seek_origin origin) { ma_decoder* pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); return ma_decoder_seek_bytes(pDecoder, offset, (origin == drflac_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); } -ma_uint32 ma_decoder_internal_on_read_pcm_frames__flac(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__flac(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { - ma_decoder* pDecoder; drflac* pFlac; - (void)pDSP; - - pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); pFlac = (drflac*)pDecoder->pInternalDecoder; - ma_assert(pFlac != NULL); + MA_ASSERT(pFlac != NULL); switch (pDecoder->internalFormat) { - case ma_format_s16: return (ma_uint32)drflac_read_pcm_frames_s16(pFlac, frameCount, (drflac_int16*)pFramesOut); - case ma_format_s32: return (ma_uint32)drflac_read_pcm_frames_s32(pFlac, frameCount, (drflac_int32*)pFramesOut); - case ma_format_f32: return (ma_uint32)drflac_read_pcm_frames_f32(pFlac, frameCount, (float*)pFramesOut); + case ma_format_s16: return drflac_read_pcm_frames_s16(pFlac, frameCount, (drflac_int16*)pFramesOut); + case ma_format_s32: return drflac_read_pcm_frames_s32(pFlac, frameCount, (drflac_int32*)pFramesOut); + case ma_format_f32: return drflac_read_pcm_frames_f32(pFlac, frameCount, (float*)pFramesOut); default: break; } /* Should never get here. If we do, it means the internal format was not set correctly at initialization time. */ - ma_assert(MA_FALSE); + MA_ASSERT(MA_FALSE); return 0; } -ma_result ma_decoder_internal_on_seek_to_pcm_frame__flac(ma_decoder* pDecoder, ma_uint64 frameIndex) +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__flac(ma_decoder* pDecoder, ma_uint64 frameIndex) { drflac* pFlac; drflac_bool32 result; pFlac = (drflac*)pDecoder->pInternalDecoder; - ma_assert(pFlac != NULL); + MA_ASSERT(pFlac != NULL); result = drflac_seek_to_pcm_frame(pFlac, frameIndex); if (result) { @@ -33792,36 +34739,42 @@ ma_result ma_decoder_internal_on_seek_to_pcm_frame__flac(ma_decoder* pDecoder, m } } -ma_result ma_decoder_internal_on_uninit__flac(ma_decoder* pDecoder) +static ma_result ma_decoder_internal_on_uninit__flac(ma_decoder* pDecoder) { drflac_close((drflac*)pDecoder->pInternalDecoder); return MA_SUCCESS; } -ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__flac(ma_decoder* pDecoder) +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__flac(ma_decoder* pDecoder) { return ((drflac*)pDecoder->pInternalDecoder)->totalPCMFrameCount; } -ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) { drflac* pFlac; - ma_result result; + drflac_allocation_callbacks allocationCallbacks; - ma_assert(pConfig != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); + + allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree; /* Try opening the decoder first. */ - pFlac = drflac_open(ma_decoder_internal_on_read__flac, ma_decoder_internal_on_seek__flac, pDecoder, NULL); + pFlac = drflac_open(ma_decoder_internal_on_read__flac, ma_decoder_internal_on_seek__flac, pDecoder, &allocationCallbacks); if (pFlac == NULL) { return MA_ERROR; } /* If we get here it means we successfully initialized the FLAC decoder. We can now initialize the rest of the ma_decoder. */ - pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__flac; - pDecoder->onUninit = ma_decoder_internal_on_uninit__flac; + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__flac; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__flac; + pDecoder->onUninit = ma_decoder_internal_on_uninit__flac; pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__flac; - pDecoder->pInternalDecoder = pFlac; + pDecoder->pInternalDecoder = pFlac; /* dr_flac supports reading as s32, s16 and f32. Try to do a one-to-one mapping if possible, but fall back to s32 if not. s32 is the "native" FLAC format @@ -33838,15 +34791,9 @@ ma_result ma_decoder_init_flac__internal(const ma_decoder_config* pConfig, ma_de pDecoder->internalSampleRate = pFlac->sampleRate; ma_get_standard_channel_map(ma_standard_channel_map_flac, pDecoder->internalChannels, pDecoder->internalChannelMap); - result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__flac); - if (result != MA_SUCCESS) { - drflac_close(pFlac); - return result; - } - return MA_SUCCESS; } -#endif +#endif /* dr_flac_h */ /* Vorbis */ #ifdef STB_VORBIS_INCLUDE_STB_VORBIS_H @@ -33866,13 +34813,13 @@ typedef struct float** ppPacketData; } ma_vorbis_decoder; -ma_uint32 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, void* pFramesOut, ma_uint32 frameCount) +static ma_uint64 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { float* pFramesOutF; - ma_uint32 totalFramesRead; + ma_uint64 totalFramesRead; - ma_assert(pVorbis != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pVorbis != NULL); + MA_ASSERT(pDecoder != NULL); pFramesOutF = (float*)pFramesOut; @@ -33886,17 +34833,17 @@ ma_uint32 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decod pFramesOutF += 1; } - pVorbis->framesConsumed += 1; + pVorbis->framesConsumed += 1; pVorbis->framesRemaining -= 1; - frameCount -= 1; - totalFramesRead += 1; + frameCount -= 1; + totalFramesRead += 1; } if (frameCount == 0) { break; } - ma_assert(pVorbis->framesRemaining == 0); + MA_ASSERT(pVorbis->framesRemaining == 0); /* We've run out of cached frames, so decode the next packet and continue iteration. */ do @@ -33926,10 +34873,11 @@ ma_uint32 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decod size_t bytesRead; if (pVorbis->dataCapacity == pVorbis->dataSize) { /* No room. Expand. */ + size_t oldCap = pVorbis->dataCapacity; size_t newCap = pVorbis->dataCapacity + MA_VORBIS_DATA_CHUNK_SIZE; ma_uint8* pNewData; - pNewData = (ma_uint8*)ma_realloc(pVorbis->pData, newCap); + pNewData = (ma_uint8*)ma__realloc_from_callbacks(pVorbis->pData, newCap, oldCap, &pDecoder->allocationCallbacks); if (pNewData == NULL) { return totalFramesRead; /* Out of memory. */ } @@ -33952,12 +34900,12 @@ ma_uint32 ma_vorbis_decoder_read_pcm_frames(ma_vorbis_decoder* pVorbis, ma_decod return totalFramesRead; } -ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, ma_uint64 frameIndex) +static ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_decoder* pDecoder, ma_uint64 frameIndex) { float buffer[4096]; - ma_assert(pVorbis != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pVorbis != NULL); + MA_ASSERT(pDecoder != NULL); /* This is terribly inefficient because stb_vorbis does not have a good seeking solution with it's push API. Currently this just performs @@ -33969,9 +34917,9 @@ ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_dec } stb_vorbis_flush_pushdata(pVorbis->pInternalVorbis); - pVorbis->framesConsumed = 0; + pVorbis->framesConsumed = 0; pVorbis->framesRemaining = 0; - pVorbis->dataSize = 0; + pVorbis->dataSize = 0; while (frameIndex > 0) { ma_uint32 framesRead; @@ -33980,7 +34928,7 @@ ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_dec framesToRead = (ma_uint32)frameIndex; } - framesRead = ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, buffer, framesToRead); + framesRead = (ma_uint32)ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, buffer, framesToRead); if (framesRead == 0) { return MA_ERROR; } @@ -33992,53 +34940,49 @@ ma_result ma_vorbis_decoder_seek_to_pcm_frame(ma_vorbis_decoder* pVorbis, ma_dec } -ma_result ma_decoder_internal_on_seek_to_pcm_frame__vorbis(ma_decoder* pDecoder, ma_uint64 frameIndex) +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__vorbis(ma_decoder* pDecoder, ma_uint64 frameIndex) { ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder; - ma_assert(pVorbis != NULL); + MA_ASSERT(pVorbis != NULL); return ma_vorbis_decoder_seek_to_pcm_frame(pVorbis, pDecoder, frameIndex); } -ma_result ma_decoder_internal_on_uninit__vorbis(ma_decoder* pDecoder) +static ma_result ma_decoder_internal_on_uninit__vorbis(ma_decoder* pDecoder) { ma_vorbis_decoder* pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder; - ma_assert(pVorbis != NULL); + MA_ASSERT(pVorbis != NULL); stb_vorbis_close(pVorbis->pInternalVorbis); - ma_free(pVorbis->pData); - ma_free(pVorbis); + ma__free_from_callbacks(pVorbis->pData, &pDecoder->allocationCallbacks); + ma__free_from_callbacks(pVorbis, &pDecoder->allocationCallbacks); return MA_SUCCESS; } -ma_uint32 ma_decoder_internal_on_read_pcm_frames__vorbis(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__vorbis(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { - ma_decoder* pDecoder; ma_vorbis_decoder* pVorbis; - (void)pDSP; - - pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); - ma_assert(pDecoder->internalFormat == ma_format_f32); + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pDecoder->internalFormat == ma_format_f32); pVorbis = (ma_vorbis_decoder*)pDecoder->pInternalDecoder; - ma_assert(pVorbis != NULL); + MA_ASSERT(pVorbis != NULL); return ma_vorbis_decoder_read_pcm_frames(pVorbis, pDecoder, pFramesOut, frameCount); } -ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__vorbis(ma_decoder* pDecoder) +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__vorbis(ma_decoder* pDecoder) { /* No good way to do this with Vorbis. */ (void)pDecoder; return 0; } -ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) { - ma_result result; stb_vorbis* pInternalVorbis = NULL; size_t dataSize = 0; size_t dataCapacity = 0; @@ -34047,8 +34991,8 @@ ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_ size_t vorbisDataSize; ma_vorbis_decoder* pVorbis; - ma_assert(pConfig != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); /* We grow the buffer in chunks. */ do @@ -34058,11 +35002,12 @@ ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_ size_t bytesRead; int vorbisError = 0; int consumedDataSize = 0; + size_t oldCapacity = dataCapacity; dataCapacity += MA_VORBIS_DATA_CHUNK_SIZE; - pNewData = (ma_uint8*)ma_realloc(pData, dataCapacity); + pNewData = (ma_uint8*)ma__realloc_from_callbacks(pData, dataCapacity, oldCapacity, &pDecoder->allocationCallbacks); if (pNewData == NULL) { - ma_free(pData); + ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks); return MA_OUT_OF_MEMORY; } @@ -34109,91 +35054,81 @@ ma_result ma_decoder_init_vorbis__internal(const ma_decoder_config* pConfig, ma_ /* Don't allow more than MA_MAX_CHANNELS channels. */ if (vorbisInfo.channels > MA_MAX_CHANNELS) { stb_vorbis_close(pInternalVorbis); - ma_free(pData); + ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks); return MA_ERROR; /* Too many channels. */ } vorbisDataSize = sizeof(ma_vorbis_decoder) + sizeof(float)*vorbisInfo.max_frame_size; - pVorbis = (ma_vorbis_decoder*)ma_malloc(vorbisDataSize); + pVorbis = (ma_vorbis_decoder*)ma__malloc_from_callbacks(vorbisDataSize, &pDecoder->allocationCallbacks); if (pVorbis == NULL) { stb_vorbis_close(pInternalVorbis); - ma_free(pData); + ma__free_from_callbacks(pData, &pDecoder->allocationCallbacks); return MA_OUT_OF_MEMORY; } - ma_zero_memory(pVorbis, vorbisDataSize); + MA_ZERO_MEMORY(pVorbis, vorbisDataSize); pVorbis->pInternalVorbis = pInternalVorbis; pVorbis->pData = pData; pVorbis->dataSize = dataSize; pVorbis->dataCapacity = dataCapacity; - pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__vorbis; - pDecoder->onUninit = ma_decoder_internal_on_uninit__vorbis; + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__vorbis; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__vorbis; + pDecoder->onUninit = ma_decoder_internal_on_uninit__vorbis; pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__vorbis; - pDecoder->pInternalDecoder = pVorbis; + pDecoder->pInternalDecoder = pVorbis; /* The internal format is always f32. */ - pDecoder->internalFormat = ma_format_f32; - pDecoder->internalChannels = vorbisInfo.channels; + pDecoder->internalFormat = ma_format_f32; + pDecoder->internalChannels = vorbisInfo.channels; pDecoder->internalSampleRate = vorbisInfo.sample_rate; ma_get_standard_channel_map(ma_standard_channel_map_vorbis, pDecoder->internalChannels, pDecoder->internalChannelMap); - result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__vorbis); - if (result != MA_SUCCESS) { - stb_vorbis_close(pVorbis->pInternalVorbis); - ma_free(pVorbis->pData); - ma_free(pVorbis); - return result; - } - return MA_SUCCESS; } -#endif +#endif /* STB_VORBIS_INCLUDE_STB_VORBIS_H */ /* MP3 */ #ifdef dr_mp3_h #define MA_HAS_MP3 -size_t ma_decoder_internal_on_read__mp3(void* pUserData, void* pBufferOut, size_t bytesToRead) +static size_t ma_decoder_internal_on_read__mp3(void* pUserData, void* pBufferOut, size_t bytesToRead) { ma_decoder* pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); return ma_decoder_read_bytes(pDecoder, pBufferOut, bytesToRead); } -drmp3_bool32 ma_decoder_internal_on_seek__mp3(void* pUserData, int offset, drmp3_seek_origin origin) +static drmp3_bool32 ma_decoder_internal_on_seek__mp3(void* pUserData, int offset, drmp3_seek_origin origin) { ma_decoder* pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); return ma_decoder_seek_bytes(pDecoder, offset, (origin == drmp3_seek_origin_start) ? ma_seek_origin_start : ma_seek_origin_current); } -ma_uint32 ma_decoder_internal_on_read_pcm_frames__mp3(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__mp3(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { - ma_decoder* pDecoder; drmp3* pMP3; - (void)pDSP; - - pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); - ma_assert(pDecoder->internalFormat == ma_format_f32); + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); + MA_ASSERT(pDecoder->internalFormat == ma_format_f32); pMP3 = (drmp3*)pDecoder->pInternalDecoder; - ma_assert(pMP3 != NULL); + MA_ASSERT(pMP3 != NULL); - return (ma_uint32)drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pFramesOut); + return drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pFramesOut); } -ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma_uint64 frameIndex) +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma_uint64 frameIndex) { drmp3* pMP3; drmp3_bool32 result; pMP3 = (drmp3*)pDecoder->pInternalDecoder; - ma_assert(pMP3 != NULL); + MA_ASSERT(pMP3 != NULL); result = drmp3_seek_to_pcm_frame(pMP3, frameIndex); if (result) { @@ -34203,32 +35138,37 @@ ma_result ma_decoder_internal_on_seek_to_pcm_frame__mp3(ma_decoder* pDecoder, ma } } -ma_result ma_decoder_internal_on_uninit__mp3(ma_decoder* pDecoder) +static ma_result ma_decoder_internal_on_uninit__mp3(ma_decoder* pDecoder) { drmp3_uninit((drmp3*)pDecoder->pInternalDecoder); - ma_free(pDecoder->pInternalDecoder); + ma__free_from_callbacks(pDecoder->pInternalDecoder, &pDecoder->allocationCallbacks); return MA_SUCCESS; } -ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__mp3(ma_decoder* pDecoder) +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__mp3(ma_decoder* pDecoder) { return drmp3_get_pcm_frame_count((drmp3*)pDecoder->pInternalDecoder); } -ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_decoder* pDecoder) { drmp3* pMP3; drmp3_config mp3Config; - ma_result result; + drmp3_allocation_callbacks allocationCallbacks; - ma_assert(pConfig != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); - pMP3 = (drmp3*)ma_malloc(sizeof(*pMP3)); + pMP3 = (drmp3*)ma__malloc_from_callbacks(sizeof(*pMP3), &pDecoder->allocationCallbacks); if (pMP3 == NULL) { return MA_OUT_OF_MEMORY; } + allocationCallbacks.pUserData = pDecoder->allocationCallbacks.pUserData; + allocationCallbacks.onMalloc = pDecoder->allocationCallbacks.onMalloc; + allocationCallbacks.onRealloc = pDecoder->allocationCallbacks.onRealloc; + allocationCallbacks.onFree = pDecoder->allocationCallbacks.onFree; + /* Try opening the decoder first. MP3 can have variable sample rates (it's per frame/packet). We therefore need to use some smarts to determine the most appropriate internal sample rate. These are the rules we're going @@ -34240,59 +35180,74 @@ ma_result ma_decoder_init_mp3__internal(const ma_decoder_config* pConfig, ma_dec The internal channel count is always stereo, and the internal format is always f32. */ - ma_zero_object(&mp3Config); + MA_ZERO_OBJECT(&mp3Config); mp3Config.outputChannels = 2; mp3Config.outputSampleRate = (pConfig->sampleRate != 0) ? pConfig->sampleRate : 44100; - if (!drmp3_init(pMP3, ma_decoder_internal_on_read__mp3, ma_decoder_internal_on_seek__mp3, pDecoder, &mp3Config, NULL)) { - ma_free(pMP3); + if (!drmp3_init(pMP3, ma_decoder_internal_on_read__mp3, ma_decoder_internal_on_seek__mp3, pDecoder, &mp3Config, &allocationCallbacks)) { + ma__free_from_callbacks(pMP3, &pDecoder->allocationCallbacks); return MA_ERROR; } /* If we get here it means we successfully initialized the MP3 decoder. We can now initialize the rest of the ma_decoder. */ - pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__mp3; - pDecoder->onUninit = ma_decoder_internal_on_uninit__mp3; + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__mp3; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__mp3; + pDecoder->onUninit = ma_decoder_internal_on_uninit__mp3; pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__mp3; - pDecoder->pInternalDecoder = pMP3; + pDecoder->pInternalDecoder = pMP3; /* Internal format. */ - pDecoder->internalFormat = ma_format_f32; - pDecoder->internalChannels = pMP3->channels; + pDecoder->internalFormat = ma_format_f32; + pDecoder->internalChannels = pMP3->channels; pDecoder->internalSampleRate = pMP3->sampleRate; ma_get_standard_channel_map(ma_standard_channel_map_default, pDecoder->internalChannels, pDecoder->internalChannelMap); - result = ma_decoder__init_dsp(pDecoder, pConfig, ma_decoder_internal_on_read_pcm_frames__mp3); - if (result != MA_SUCCESS) { - drmp3_uninit(pMP3); - ma_free(pMP3); - return result; - } - return MA_SUCCESS; } -#endif +#endif /* dr_mp3_h */ /* Raw */ -ma_uint32 ma_decoder_internal_on_read_pcm_frames__raw(ma_pcm_converter* pDSP, void* pFramesOut, ma_uint32 frameCount, void* pUserData) +static ma_uint64 ma_decoder_internal_on_read_pcm_frames__raw(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { - ma_decoder* pDecoder; ma_uint32 bpf; + ma_uint64 totalFramesRead; + void* pRunningFramesOut; - (void)pDSP; - pDecoder = (ma_decoder*)pUserData; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); + MA_ASSERT(pFramesOut != NULL); /* For raw decoding we just read directly from the decoder's callbacks. */ bpf = ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels); - return (ma_uint32)ma_decoder_read_bytes(pDecoder, pFramesOut, frameCount * bpf) / bpf; + + totalFramesRead = 0; + pRunningFramesOut = pFramesOut; + + while (totalFramesRead < frameCount) { + ma_uint64 framesReadThisIteration; + ma_uint64 framesToReadThisIteration = (frameCount - totalFramesRead); + if (framesToReadThisIteration > MA_SIZE_MAX) { + framesToReadThisIteration = MA_SIZE_MAX; + } + + framesReadThisIteration = ma_decoder_read_bytes(pDecoder, pRunningFramesOut, (size_t)framesToReadThisIteration * bpf) / bpf; /* Safe cast to size_t. */ + + totalFramesRead += framesReadThisIteration; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIteration * bpf); + + if (framesReadThisIteration < framesToReadThisIteration) { + break; /* Done. */ + } + } + + return totalFramesRead; } -ma_result ma_decoder_internal_on_seek_to_pcm_frame__raw(ma_decoder* pDecoder, ma_uint64 frameIndex) +static ma_result ma_decoder_internal_on_seek_to_pcm_frame__raw(ma_decoder* pDecoder, ma_uint64 frameIndex) { ma_bool32 result = MA_FALSE; ma_uint64 totalBytesToSeek; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); if (pDecoder->onSeek == NULL) { return MA_ERROR; @@ -34332,53 +35287,61 @@ ma_result ma_decoder_internal_on_seek_to_pcm_frame__raw(ma_decoder* pDecoder, ma } } -ma_result ma_decoder_internal_on_uninit__raw(ma_decoder* pDecoder) +static ma_result ma_decoder_internal_on_uninit__raw(ma_decoder* pDecoder) { (void)pDecoder; return MA_SUCCESS; } -ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__raw(ma_decoder* pDecoder) +static ma_uint64 ma_decoder_internal_on_get_length_in_pcm_frames__raw(ma_decoder* pDecoder) { (void)pDecoder; return 0; } -ma_result ma_decoder_init_raw__internal(const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) +static ma_result ma_decoder_init_raw__internal(const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) { - ma_result result; - - ma_assert(pConfigIn != NULL); - ma_assert(pConfigOut != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pConfigIn != NULL); + MA_ASSERT(pConfigOut != NULL); + MA_ASSERT(pDecoder != NULL); - pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__raw; - pDecoder->onUninit = ma_decoder_internal_on_uninit__raw; + pDecoder->onReadPCMFrames = ma_decoder_internal_on_read_pcm_frames__raw; + pDecoder->onSeekToPCMFrame = ma_decoder_internal_on_seek_to_pcm_frame__raw; + pDecoder->onUninit = ma_decoder_internal_on_uninit__raw; pDecoder->onGetLengthInPCMFrames = ma_decoder_internal_on_get_length_in_pcm_frames__raw; /* Internal format. */ - pDecoder->internalFormat = pConfigIn->format; - pDecoder->internalChannels = pConfigIn->channels; + pDecoder->internalFormat = pConfigIn->format; + pDecoder->internalChannels = pConfigIn->channels; pDecoder->internalSampleRate = pConfigIn->sampleRate; ma_channel_map_copy(pDecoder->internalChannelMap, pConfigIn->channelMap, pConfigIn->channels); - result = ma_decoder__init_dsp(pDecoder, pConfigOut, ma_decoder_internal_on_read_pcm_frames__raw); - if (result != MA_SUCCESS) { - return result; - } - return MA_SUCCESS; } -ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder__init_allocation_callbacks(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + MA_ASSERT(pDecoder != NULL); + + if (pConfig != NULL) { + return ma_allocation_callbacks_init_copy(&pDecoder->allocationCallbacks, &pConfig->allocationCallbacks); + } else { + pDecoder->allocationCallbacks = ma_allocation_callbacks_init_default(); + return MA_SUCCESS; + } +} + +static ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) { - ma_assert(pConfig != NULL); + ma_result result; + + MA_ASSERT(pConfig != NULL); if (pDecoder == NULL) { return MA_INVALID_ARGS; } - ma_zero_object(pDecoder); + MA_ZERO_OBJECT(pDecoder); if (onRead == NULL || onSeek == NULL) { return MA_INVALID_ARGS; @@ -34388,10 +35351,26 @@ ma_result ma_decoder__preinit(ma_decoder_read_proc onRead, ma_decoder_seek_proc pDecoder->onSeek = onSeek; pDecoder->pUserData = pUserData; - (void)pConfig; + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + return MA_SUCCESS; } +static ma_result ma_decoder__postinit(const ma_decoder_config* pConfig, ma_decoder* pDecoder) +{ + ma_result result; + + result = ma_decoder__init_data_converter(pDecoder, pConfig); + if (result != MA_SUCCESS) { + return result; + } + + return result; +} + ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) { ma_decoder_config config; @@ -34405,10 +35384,15 @@ ma_result ma_decoder_init_wav(ma_decoder_read_proc onRead, ma_decoder_seek_proc } #ifdef MA_HAS_WAV - return ma_decoder_init_wav__internal(&config, pDecoder); + result = ma_decoder_init_wav__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34424,10 +35408,15 @@ ma_result ma_decoder_init_flac(ma_decoder_read_proc onRead, ma_decoder_seek_proc } #ifdef MA_HAS_FLAC - return ma_decoder_init_flac__internal(&config, pDecoder); + result = ma_decoder_init_flac__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34443,10 +35432,15 @@ ma_result ma_decoder_init_vorbis(ma_decoder_read_proc onRead, ma_decoder_seek_pr } #ifdef MA_HAS_VORBIS - return ma_decoder_init_vorbis__internal(&config, pDecoder); + result = ma_decoder_init_vorbis__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34462,10 +35456,15 @@ ma_result ma_decoder_init_mp3(ma_decoder_read_proc onRead, ma_decoder_seek_proc } #ifdef MA_HAS_MP3 - return ma_decoder_init_mp3__internal(&config, pDecoder); + result = ma_decoder_init_mp3__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) @@ -34480,15 +35479,20 @@ ma_result ma_decoder_init_raw(ma_decoder_read_proc onRead, ma_decoder_seek_proc return result; } - return ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder); + result = ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } -ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) { ma_result result = MA_NO_BACKEND; - ma_assert(pConfig != NULL); - ma_assert(pDecoder != NULL); + MA_ASSERT(pConfig != NULL); + MA_ASSERT(pDecoder != NULL); /* Silence some warnings in the case that we don't have any decoder backends enabled. */ (void)onRead; @@ -34536,7 +35540,7 @@ ma_result ma_decoder_init__internal(ma_decoder_read_proc onRead, ma_decoder_seek return result; } - return result; + return ma_decoder__postinit(pConfig, pDecoder); } ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSeek, void* pUserData, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34555,11 +35559,11 @@ ma_result ma_decoder_init(ma_decoder_read_proc onRead, ma_decoder_seek_proc onSe } -size_t ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) +static size_t ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) { size_t bytesRemaining; - ma_assert(pDecoder->memory.dataSize >= pDecoder->memory.currentReadPos); + MA_ASSERT(pDecoder->memory.dataSize >= pDecoder->memory.currentReadPos); bytesRemaining = pDecoder->memory.dataSize - pDecoder->memory.currentReadPos; if (bytesToRead > bytesRemaining) { @@ -34567,14 +35571,14 @@ size_t ma_decoder__on_read_memory(ma_decoder* pDecoder, void* pBufferOut, size_t } if (bytesToRead > 0) { - ma_copy_memory(pBufferOut, pDecoder->memory.pData + pDecoder->memory.currentReadPos, bytesToRead); + MA_COPY_MEMORY(pBufferOut, pDecoder->memory.pData + pDecoder->memory.currentReadPos, bytesToRead); pDecoder->memory.currentReadPos += bytesToRead; } return bytesToRead; } -ma_bool32 ma_decoder__on_seek_memory(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) +static ma_bool32 ma_decoder__on_seek_memory(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) { if (origin == ma_seek_origin_current) { if (byteOffset > 0) { @@ -34600,7 +35604,7 @@ ma_bool32 ma_decoder__on_seek_memory(ma_decoder* pDecoder, int byteOffset, ma_se return MA_TRUE; } -ma_result ma_decoder__preinit_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder__preinit_memory(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) { ma_result result = ma_decoder__preinit(ma_decoder__on_read_memory, ma_decoder__on_seek_memory, NULL, pConfig, pDecoder); if (result != MA_SUCCESS) { @@ -34647,10 +35651,15 @@ ma_result ma_decoder_init_memory_wav(const void* pData, size_t dataSize, const m } #ifdef MA_HAS_WAV - return ma_decoder_init_wav__internal(&config, pDecoder); + result = ma_decoder_init_wav__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34666,10 +35675,15 @@ ma_result ma_decoder_init_memory_flac(const void* pData, size_t dataSize, const } #ifdef MA_HAS_FLAC - return ma_decoder_init_flac__internal(&config, pDecoder); + result = ma_decoder_init_flac__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34685,10 +35699,15 @@ ma_result ma_decoder_init_memory_vorbis(const void* pData, size_t dataSize, cons } #ifdef MA_HAS_VORBIS - return ma_decoder_init_vorbis__internal(&config, pDecoder); + result = ma_decoder_init_vorbis__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const ma_decoder_config* pConfig, ma_decoder* pDecoder) @@ -34704,10 +35723,15 @@ ma_result ma_decoder_init_memory_mp3(const void* pData, size_t dataSize, const m } #ifdef MA_HAS_MP3 - return ma_decoder_init_mp3__internal(&config, pDecoder); + result = ma_decoder_init_mp3__internal(&config, pDecoder); #else - return MA_NO_BACKEND; + result = MA_NO_BACKEND; #endif + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const ma_decoder_config* pConfigIn, const ma_decoder_config* pConfigOut, ma_decoder* pDecoder) @@ -34722,17 +35746,16 @@ ma_result ma_decoder_init_memory_raw(const void* pData, size_t dataSize, const m return result; } - return ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder); + result = ma_decoder_init_raw__internal(pConfigIn, &config, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + + return ma_decoder__postinit(&config, pDecoder); } #ifndef MA_NO_STDIO -#include -#if !defined(_MSC_VER) && !defined(__DMC__) -#include /* For strcasecmp(). */ -#include /* For wcsrtombs() */ -#endif - -const char* ma_path_file_name(const char* path) +static const char* ma_path_file_name(const char* path) { const char* fileName; @@ -34759,7 +35782,7 @@ const char* ma_path_file_name(const char* path) return fileName; } -const wchar_t* ma_path_file_name_w(const wchar_t* path) +static const wchar_t* ma_path_file_name_w(const wchar_t* path) { const wchar_t* fileName; @@ -34787,7 +35810,7 @@ const wchar_t* ma_path_file_name_w(const wchar_t* path) } -const char* ma_path_extension(const char* path) +static const char* ma_path_extension(const char* path) { const char* extension; const char* lastOccurance; @@ -34812,7 +35835,7 @@ const char* ma_path_extension(const char* path) return (lastOccurance != NULL) ? lastOccurance : extension; } -const wchar_t* ma_path_extension_w(const wchar_t* path) +static const wchar_t* ma_path_extension_w(const wchar_t* path) { const wchar_t* extension; const wchar_t* lastOccurance; @@ -34838,7 +35861,7 @@ const wchar_t* ma_path_extension_w(const wchar_t* path) } -ma_bool32 ma_path_extension_equal(const char* path, const char* extension) +static ma_bool32 ma_path_extension_equal(const char* path, const char* extension) { const char* ext1; const char* ext2; @@ -34857,7 +35880,7 @@ ma_bool32 ma_path_extension_equal(const char* path, const char* extension) #endif } -ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extension) +static ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extension) { const wchar_t* ext1; const wchar_t* ext2; @@ -34884,8 +35907,8 @@ ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extensio mbstate_t mbs1; mbstate_t mbs2; - ma_zero_object(&mbs1); - ma_zero_object(&mbs2); + MA_ZERO_OBJECT(&mbs1); + MA_ZERO_OBJECT(&mbs2); if (wcsrtombs(ext1MB, &pext1, sizeof(ext1MB), &mbs1) == (size_t)-1) { return MA_FALSE; @@ -34900,30 +35923,36 @@ ma_bool32 ma_path_extension_equal_w(const wchar_t* path, const wchar_t* extensio } -size_t ma_decoder__on_read_stdio(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) +static size_t ma_decoder__on_read_stdio(ma_decoder* pDecoder, void* pBufferOut, size_t bytesToRead) { return fread(pBufferOut, 1, bytesToRead, (FILE*)pDecoder->pUserData); } -ma_bool32 ma_decoder__on_seek_stdio(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) +static ma_bool32 ma_decoder__on_seek_stdio(ma_decoder* pDecoder, int byteOffset, ma_seek_origin origin) { return fseek((FILE*)pDecoder->pUserData, byteOffset, (origin == ma_seek_origin_current) ? SEEK_CUR : SEEK_SET) == 0; } -ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +static ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) { + ma_result result; FILE* pFile; if (pDecoder == NULL) { return MA_INVALID_ARGS; } - ma_zero_object(pDecoder); + MA_ZERO_OBJECT(pDecoder); if (pFilePath == NULL || pFilePath[0] == '\0') { return MA_INVALID_ARGS; } + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + #if defined(_MSC_VER) && _MSC_VER >= 1400 if (fopen_s(&pFile, pFilePath, "rb") != 0) { return MA_ERROR; @@ -34938,25 +35967,47 @@ ma_result ma_decoder__preinit_file(const char* pFilePath, const ma_decoder_confi /* We need to manually set the user data so the calls to ma_decoder__on_seek_stdio() succeed. */ pDecoder->pUserData = pFile; - (void)pConfig; return MA_SUCCESS; } -ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) +/* +_wfopen() isn't always available in all compilation environments. + + * Windows only. + * MSVC seems to support it universally as far back as VC6 from what I can tell (haven't checked further back). + * MinGW-64 (both 32- and 64-bit) seems to support it. + * MinGW wraps it in !defined(__STRICT_ANSI__). + +This can be reviewed as compatibility issues arise. The preference is to use _wfopen_s() and _wfopen() as opposed to the wcsrtombs() +fallback, so if you notice your compiler not detecting this properly I'm happy to look at adding support. +*/ +#if defined(_WIN32) + #if defined(_MSC_VER) || defined(__MINGW64__) || !defined(__STRICT_ANSI__) + #define MA_HAS_WFOPEN + #endif +#endif + +static ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_config* pConfig, ma_decoder* pDecoder) { + ma_result result; FILE* pFile; if (pDecoder == NULL) { return MA_INVALID_ARGS; } - ma_zero_object(pDecoder); + MA_ZERO_OBJECT(pDecoder); if (pFilePath == NULL || pFilePath[0] == '\0') { return MA_INVALID_ARGS; } -#if defined(_WIN32) + result = ma_decoder__init_allocation_callbacks(pConfig, pDecoder); + if (result != MA_SUCCESS) { + return result; + } + +#if defined(MA_HAS_WFOPEN) /* Use _wfopen() on Windows. */ #if defined(_MSC_VER) && _MSC_VER >= 1400 if (_wfopen_s(&pFile, pFilePath, L"rb") != 0) { @@ -34981,24 +36032,24 @@ ma_result ma_decoder__preinit_file_w(const wchar_t* pFilePath, const ma_decoder_ char* pFilePathMB = NULL; /* Get the length first. */ - ma_zero_object(&mbs); + MA_ZERO_OBJECT(&mbs); lenMB = wcsrtombs(NULL, &pFilePathTemp, 0, &mbs); if (lenMB == (size_t)-1) { return MA_ERROR; } - pFilePathMB = (char*)MA_MALLOC(lenMB + 1); + pFilePathMB = (char*)ma__malloc_from_callbacks(lenMB + 1, &pDecoder->allocationCallbacks); if (pFilePathMB == NULL) { return MA_OUT_OF_MEMORY; } pFilePathTemp = pFilePath; - ma_zero_object(&mbs); + MA_ZERO_OBJECT(&mbs); wcsrtombs(pFilePathMB, &pFilePathTemp, lenMB + 1, &mbs); pFile = fopen(pFilePathMB, "rb"); - MA_FREE(pFilePathMB); + ma__free_from_callbacks(pFilePathMB, &pDecoder->allocationCallbacks); } if (pFile == NULL) { @@ -35175,7 +36226,7 @@ ma_result ma_decoder_init_file_mp3_w(const wchar_t* pFilePath, const ma_decoder_ return ma_decoder_init_mp3(ma_decoder__on_read_stdio, ma_decoder__on_seek_stdio, pDecoder->pUserData, pConfig, pDecoder); } -#endif +#endif /* MA_NO_STDIO */ ma_result ma_decoder_uninit(ma_decoder* pDecoder) { @@ -35194,6 +36245,8 @@ ma_result ma_decoder_uninit(ma_decoder* pDecoder) } #endif + ma_data_converter_uninit(&pDecoder->converter); + return MA_SUCCESS; } @@ -35212,11 +36265,73 @@ ma_uint64 ma_decoder_get_length_in_pcm_frames(ma_decoder* pDecoder) ma_uint64 ma_decoder_read_pcm_frames(ma_decoder* pDecoder, void* pFramesOut, ma_uint64 frameCount) { + ma_result result; + ma_uint64 totalFramesReadOut; + ma_uint64 totalFramesReadIn; + void* pRunningFramesOut; + if (pDecoder == NULL) { return 0; } - return ma_pcm_converter_read(&pDecoder->dsp, pFramesOut, frameCount); + if (pDecoder->onReadPCMFrames == NULL) { + return 0; + } + + /* Fast path. */ + if (pDecoder->converter.isPassthrough) { + return pDecoder->onReadPCMFrames(pDecoder, pFramesOut, frameCount); + } + + /* Getting here means we need to do data conversion. */ + totalFramesReadOut = 0; + totalFramesReadIn = 0; + pRunningFramesOut = pFramesOut; + + while (totalFramesReadOut < frameCount) { + ma_uint8 pIntermediaryBuffer[MA_DATA_CONVERTER_STACK_BUFFER_SIZE]; /* In internal format. */ + ma_uint64 intermediaryBufferCap = sizeof(pIntermediaryBuffer) / ma_get_bytes_per_frame(pDecoder->internalFormat, pDecoder->internalChannels); + ma_uint64 framesToReadThisIterationIn; + ma_uint64 framesReadThisIterationIn; + ma_uint64 framesToReadThisIterationOut; + ma_uint64 framesReadThisIterationOut; + ma_uint64 requiredInputFrameCount; + + framesToReadThisIterationOut = (frameCount - totalFramesReadOut); + framesToReadThisIterationIn = framesToReadThisIterationOut; + if (framesToReadThisIterationIn > intermediaryBufferCap) { + framesToReadThisIterationIn = intermediaryBufferCap; + } + + requiredInputFrameCount = ma_data_converter_get_required_input_frame_count(&pDecoder->converter, framesToReadThisIterationOut); + if (framesToReadThisIterationIn > requiredInputFrameCount) { + framesToReadThisIterationIn = requiredInputFrameCount; + } + + if (requiredInputFrameCount > 0) { + framesReadThisIterationIn = pDecoder->onReadPCMFrames(pDecoder, pIntermediaryBuffer, framesToReadThisIterationIn); + totalFramesReadIn += framesReadThisIterationIn; + } + + /* + At this point we have our decoded data in input format and now we need to convert to output format. Note that even if we didn't read any + input frames, we still want to try processing frames because there may some output frames generated from cached input data. + */ + framesReadThisIterationOut = framesToReadThisIterationOut; + result = ma_data_converter_process_pcm_frames(&pDecoder->converter, pIntermediaryBuffer, &framesReadThisIterationIn, pRunningFramesOut, &framesReadThisIterationOut); + if (result != MA_SUCCESS) { + break; + } + + totalFramesReadOut += framesReadThisIterationOut; + pRunningFramesOut = ma_offset_ptr(pRunningFramesOut, framesReadThisIterationOut * ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels)); + + if (framesReadThisIterationIn == 0 && framesReadThisIterationOut == 0) { + break; /* We're done. */ + } + } + + return totalFramesReadOut; } ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameIndex) @@ -35234,14 +36349,14 @@ ma_result ma_decoder_seek_to_pcm_frame(ma_decoder* pDecoder, ma_uint64 frameInde } -ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) +static ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_config* pConfigOut, ma_uint64* pFrameCountOut, void** ppPCMFramesOut) { ma_uint64 totalFrameCount; ma_uint64 bpf; ma_uint64 dataCapInFrames; void* pPCMFramesOut; - ma_assert(pDecoder != NULL); + MA_ASSERT(pDecoder != NULL); totalFrameCount = 0; bpf = ma_get_bytes_per_frame(pDecoder->outputFormat, pDecoder->outputChannels); @@ -35256,20 +36371,21 @@ ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_co /* Make room if there's not enough. */ if (totalFrameCount == dataCapInFrames) { void* pNewPCMFramesOut; + ma_uint64 oldDataCapInFrames = dataCapInFrames; ma_uint64 newDataCapInFrames = dataCapInFrames*2; if (newDataCapInFrames == 0) { newDataCapInFrames = 4096; } if ((newDataCapInFrames * bpf) > MA_SIZE_MAX) { - ma_free(pPCMFramesOut); + ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks); return MA_TOO_LARGE; } - pNewPCMFramesOut = (void*)ma_realloc(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf)); + pNewPCMFramesOut = (void*)ma__realloc_from_callbacks(pPCMFramesOut, (size_t)(newDataCapInFrames * bpf), (size_t)(oldDataCapInFrames * bpf), &pDecoder->allocationCallbacks); if (pNewPCMFramesOut == NULL) { - ma_free(pPCMFramesOut); + ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks); return MA_OUT_OF_MEMORY; } @@ -35278,7 +36394,7 @@ ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_co } frameCountToTryReading = dataCapInFrames - totalFrameCount; - ma_assert(frameCountToTryReading > 0); + MA_ASSERT(frameCountToTryReading > 0); framesJustRead = ma_decoder_read_pcm_frames(pDecoder, (ma_uint8*)pPCMFramesOut + (totalFrameCount * bpf), frameCountToTryReading); totalFrameCount += framesJustRead; @@ -35299,7 +36415,7 @@ ma_result ma_decoder__full_decode_and_uninit(ma_decoder* pDecoder, ma_decoder_co if (ppPCMFramesOut != NULL) { *ppPCMFramesOut = pPCMFramesOut; } else { - ma_free(pPCMFramesOut); + ma__free_from_callbacks(pPCMFramesOut, &pDecoder->allocationCallbacks); } if (pFrameCountOut != NULL) { @@ -35381,7 +36497,7 @@ ma_result ma_sine_wave_init(double amplitude, double periodsPerSecond, ma_uint32 if (pSineWave == NULL) { return MA_INVALID_ARGS; } - ma_zero_object(pSineWave); + MA_ZERO_OBJECT(pSineWave); if (amplitude == 0 || periodsPerSecond == 0) { return MA_INVALID_ARGS; @@ -35402,37 +36518,30 @@ ma_result ma_sine_wave_init(double amplitude, double periodsPerSecond, ma_uint32 return MA_SUCCESS; } -ma_uint64 ma_sine_wave_read_f32(ma_sine_wave* pSineWave, ma_uint64 count, float* pSamples) -{ - return ma_sine_wave_read_f32_ex(pSineWave, count, 1, ma_stream_layout_interleaved, &pSamples); -} - -ma_uint64 ma_sine_wave_read_f32_ex(ma_sine_wave* pSineWave, ma_uint64 frameCount, ma_uint32 channels, ma_stream_layout layout, float** ppFrames) +ma_uint64 ma_sine_wave_read_pcm_frames(ma_sine_wave* pSineWave, void* pFramesOut, ma_uint64 frameCount, ma_format format, ma_uint32 channels) { if (pSineWave == NULL) { return 0; } - if (ppFrames != NULL) { + if (pFramesOut != NULL) { ma_uint64 iFrame; for (iFrame = 0; iFrame < frameCount; iFrame += 1) { - ma_uint32 iChannel; + ma_uint64 iChannel; + float s; - float s = (float)(sin(pSineWave->time * pSineWave->periodsPerSecond) * pSineWave->amplitude); + s = (float)(sin(pSineWave->time * pSineWave->periodsPerSecond) * pSineWave->amplitude); pSineWave->time += pSineWave->delta; - if (layout == ma_stream_layout_interleaved) { - for (iChannel = 0; iChannel < channels; iChannel += 1) { - ppFrames[0][iFrame*channels + iChannel] = s; - } - } else { - for (iChannel = 0; iChannel < channels; iChannel += 1) { - ppFrames[iChannel][iFrame] = s; - } + for (iChannel = 0; iChannel < channels; iChannel += 1) { + ma_uint32 bpf = ma_get_bytes_per_frame(format, channels); + ma_uint32 bps = ma_get_bytes_per_sample(format); + + ma_pcm_convert(ma_offset_ptr(pFramesOut, iFrame*bpf + iChannel*bps), format, &s, ma_format_f32, 1, ma_dither_mode_none); } } } else { - pSineWave->time += pSineWave->delta * (ma_int64)frameCount; /* Cast to int64 required for VC6. */ + pSineWave->time += pSineWave->delta * (ma_int64)frameCount; /* Cast to int64 required for VC6. Won't affect anything in practice. */ } return frameCount; @@ -35444,37 +36553,26 @@ ma_uint64 ma_sine_wave_read_f32_ex(ma_sine_wave* pSineWave, ma_uint64 frameCount #endif /* MINIAUDIO_IMPLEMENTATION */ -/* -BACKEND IMPLEMENTATION GUIDELINES -================================= -Context -------- -- Run-time linking if possible. -- Set whether or not it's an asynchronous backend - -Device ------- -- If a full-duplex device is requested and the backend does not support full duplex devices, have ma_device_init__[backend]() - return MA_DEVICE_TYPE_NOT_SUPPORTED. -- If exclusive mode is requested, but the backend does not support it, return MA_SHARE_MODE_NOT_SUPPORTED. If practical, try - not to fall back to a different share mode - give the client exactly what they asked for. Some backends, such as ALSA, may - not have a practical way to distinguish between the two. -- If pDevice->usingDefault* is set, prefer the device's native value if the backend supports it. Otherwise use the relevant - value from the config. -- If the configs buffer size in frames is 0, set it based on the buffer size in milliseconds, keeping in mind to handle the - case when the default sample rate is being used where practical. -- Backends must set the following members of pDevice before returning successfully from ma_device_init__[backend](): - - internalFormat - - internalChannels - - internalSampleRate - - internalChannelMap - - bufferSizeInFrames - - periods -*/ - /* REVISION HISTORY ================ +v0.xx.xx - 2020-xx-xx + - Fix potential crash when ma_device object's are not aligned to MA_SIMD_ALIGNMENT. + +v0.9.10 - 2020-01-15 + - Fix compilation errors due to #if/#endif mismatches. + - WASAPI: Fix a bug where automatic stream routing is being performed for devices that are initialized with an explicit device ID. + - iOS: Fix a crash on device uninitialization. + +v0.9.9 - 2020-01-09 + - Fix compilation errors with MinGW. + - Fix compilation errors when compiling on Apple platforms. + - WASAPI: Add support for disabling hardware offloading. + - WASAPI: Add support for disabling automatic stream routing. + - Core Audio: Fix bugs in the case where the internal device uses deinterleaved buffers. + - Core Audio: Add support for controlling the session category (AVAudioSessionCategory) and options (AVAudioSessionCategoryOptions). + - JACK: Fix bug where incorrect ports are connected. + v0.9.8 - 2019-10-07 - WASAPI: Fix a potential deadlock when starting a full-duplex device. - WASAPI: Enable automatic resampling by default. Disable with config.wasapi.noAutoConvertSRC. @@ -35864,7 +36962,7 @@ For more information, please refer to =============================================================================== ALTERNATIVE 2 - MIT No Attribution =============================================================================== -Copyright 2019 David Reid +Copyright 2020 David Reid Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/src/raudio.c b/src/raudio.c index 5e55a36b9..d9a5331d7 100644 --- a/src/raudio.c +++ b/src/raudio.c @@ -78,6 +78,85 @@ #include "utils.h" // Required for: fopen() Android mapping #endif + + +#if defined(_WIN32) +// @raysan5: To avoid conflicting windows.h symbols with raylib, so flags are defined +// WARNING: Those flags avoid inclusion of some Win32 headers that could be required +// by user at some point and won't be included... +//------------------------------------------------------------------------------------- + +// If defined, the following flags inhibit definition of the indicated items. +#define NOGDICAPMASKS // CC_*, LC_*, PC_*, CP_*, TC_*, RC_ +#define NOVIRTUALKEYCODES // VK_* +#define NOWINMESSAGES // WM_*, EM_*, LB_*, CB_* +#define NOWINSTYLES // WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_* +#define NOSYSMETRICS // SM_* +#define NOMENUS // MF_* +#define NOICONS // IDI_* +#define NOKEYSTATES // MK_* +#define NOSYSCOMMANDS // SC_* +#define NORASTEROPS // Binary and Tertiary raster ops +#define NOSHOWWINDOW // SW_* +#define OEMRESOURCE // OEM Resource values +#define NOATOM // Atom Manager routines +#define NOCLIPBOARD // Clipboard routines +#define NOCOLOR // Screen colors +#define NOCTLMGR // Control and Dialog routines +#define NODRAWTEXT // DrawText() and DT_* +#define NOGDI // All GDI defines and routines +#define NOKERNEL // All KERNEL defines and routines +#define NOUSER // All USER defines and routines +//#define NONLS // All NLS defines and routines +#define NOMB // MB_* and MessageBox() +#define NOMEMMGR // GMEM_*, LMEM_*, GHND, LHND, associated routines +#define NOMETAFILE // typedef METAFILEPICT +#define NOMINMAX // Macros min(a,b) and max(a,b) +#define NOMSG // typedef MSG and associated routines +#define NOOPENFILE // OpenFile(), OemToAnsi, AnsiToOem, and OF_* +#define NOSCROLL // SB_* and scrolling routines +#define NOSERVICE // All Service Controller routines, SERVICE_ equates, etc. +#define NOSOUND // Sound driver routines +#define NOTEXTMETRIC // typedef TEXTMETRIC and associated routines +#define NOWH // SetWindowsHook and WH_* +#define NOWINOFFSETS // GWL_*, GCL_*, associated routines +#define NOCOMM // COMM driver routines +#define NOKANJI // Kanji support stuff. +#define NOHELP // Help engine interface. +#define NOPROFILER // Profiler interface. +#define NODEFERWINDOWPOS // DeferWindowPos routines +#define NOMCX // Modem Configuration Extensions + +// Type required before windows.h inclusion +typedef struct tagMSG *LPMSG; + +#include + +// Type required by some unused function... +typedef struct tagBITMAPINFOHEADER { + DWORD biSize; + LONG biWidth; + LONG biHeight; + WORD biPlanes; + WORD biBitCount; + DWORD biCompression; + DWORD biSizeImage; + LONG biXPelsPerMeter; + LONG biYPelsPerMeter; + DWORD biClrUsed; + DWORD biClrImportant; +} BITMAPINFOHEADER, *PBITMAPINFOHEADER; + +#include +#include +#include + +// @raysan5: Some required types defined for MSVC/TinyC compiler +#if defined(_MSC_VER) || defined(__TINYC__) + #include "propidl.h" +#endif +#endif + #define MA_NO_JACK #define MINIAUDIO_IMPLEMENTATION #include "external/miniaudio.h" // miniaudio library @@ -172,7 +251,7 @@ typedef enum { // Audio buffer structure struct rAudioBuffer { - ma_pcm_converter dsp; // PCM data converter + ma_data_converter converter; // Audio data converter float volume; // Audio buffer volume float pitch; // Audio buffer pitch @@ -185,7 +264,7 @@ struct rAudioBuffer { bool isSubBufferProcessed[2]; // SubBuffer processed (virtual double buffer) unsigned int sizeInFrames; // Total buffer size in frames unsigned int frameCursorPos; // Frame cursor position - unsigned int totalFramesProcessed; // Total frames processed in this buffer (required for play timming) + unsigned int totalFramesProcessed; // Total frames processed in this buffer (required for play timing) unsigned char *data; // Data buffer, on music stream keeps filling @@ -202,7 +281,6 @@ typedef struct AudioData { ma_device device; // miniaudio device ma_mutex lock; // miniaudio mutex lock bool isReady; // Check if audio device is ready - float masterVolume; // Master volume (multiplied on output mixing) } System; struct { AudioBuffer *first; // Pointer to first AudioBuffer in the list @@ -225,7 +303,6 @@ static AudioData AUDIO = { 0 }; // Global CORE context //---------------------------------------------------------------------------------- static void OnLog(ma_context *pContext, ma_device *pDevice, ma_uint32 logLevel, const char *message); static void OnSendAudioDataToDevice(ma_device *pDevice, void *pFramesOut, const void *pFramesInput, ma_uint32 frameCount); -static ma_uint32 OnAudioBufferDSPRead(ma_pcm_converter *pDSP, void *pFramesOut, ma_uint32 frameCount, void *pUserData); static void MixAudioFrames(float *framesOut, const float *framesIn, ma_uint32 frameCount, float localVolume); static void InitAudioBufferPool(void); // Initialise the multichannel buffer pool @@ -273,7 +350,6 @@ void UntrackAudioBuffer(AudioBuffer *buffer); void InitAudioDevice(void) { // TODO: Load AUDIO context memory dynamically? - AUDIO.System.masterVolume = 1.0f; // Init audio context ma_context_config ctxConfig = ma_context_config_init(); @@ -366,10 +442,7 @@ bool IsAudioDeviceReady(void) // Set master volume (listener) void SetMasterVolume(float volume) { - if (volume < 0.0f) volume = 0.0f; - else if (volume > 1.0f) volume = 1.0f; - - AUDIO.System.masterVolume = volume; + ma_device_set_master_volume(&AUDIO.System.device, volume); } //---------------------------------------------------------------------------------- @@ -390,19 +463,10 @@ AudioBuffer *InitAudioBuffer(ma_format format, ma_uint32 channels, ma_uint32 sam audioBuffer->data = RL_CALLOC(sizeInFrames*channels*ma_get_bytes_per_sample(format), 1); // Audio data runs through a format converter - ma_pcm_converter_config dspConfig; - memset(&dspConfig, 0, sizeof(dspConfig)); - dspConfig.formatIn = format; - dspConfig.formatOut = DEVICE_FORMAT; - dspConfig.channelsIn = channels; - dspConfig.channelsOut = DEVICE_CHANNELS; - dspConfig.sampleRateIn = sampleRate; - dspConfig.sampleRateOut = DEVICE_SAMPLE_RATE; - dspConfig.onRead = OnAudioBufferDSPRead; // Callback on data reading - dspConfig.pUserData = audioBuffer; // Audio data pointer - dspConfig.allowDynamicSampleRate = true; // Required for pitch shifting - - ma_result result = ma_pcm_converter_init(&dspConfig, &audioBuffer->dsp); + ma_data_converter_config converterConfig = ma_data_converter_config_init(format, DEVICE_FORMAT, channels, DEVICE_CHANNELS, sampleRate, DEVICE_SAMPLE_RATE); + converterConfig.resampling.allowDynamicSampleRate = true; // Required for pitch shifting + + ma_result result = ma_data_converter_init(&converterConfig, &audioBuffer->converter); if (result != MA_SUCCESS) { @@ -437,6 +501,7 @@ void CloseAudioBuffer(AudioBuffer *buffer) { if (buffer != NULL) { + ma_data_converter_uninit(&buffer->converter); UntrackAudioBuffer(buffer); RL_FREE(buffer->data); RL_FREE(buffer); @@ -519,10 +584,10 @@ void SetAudioBufferPitch(AudioBuffer *buffer, float pitch) // Note that this changes the duration of the sound: // - higher pitches will make the sound faster // - lower pitches make it slower - ma_uint32 newOutputSampleRate = (ma_uint32)((float)buffer->dsp.src.config.sampleRateOut/pitchMul); - buffer->pitch *= (float)buffer->dsp.src.config.sampleRateOut/newOutputSampleRate; + ma_uint32 newOutputSampleRate = (ma_uint32)((float)buffer->converter.config.sampleRateOut/pitchMul); + buffer->pitch *= (float)buffer->converter.config.sampleRateOut/newOutputSampleRate; - ma_pcm_converter_set_output_sample_rate(&buffer->dsp, newOutputSampleRate); + ma_data_converter_set_rate(&buffer->converter, buffer->converter.config.sampleRateIn, newOutputSampleRate); } else TRACELOG(LOG_WARNING, "SetAudioBufferPitch() : No audio buffer"); } @@ -621,13 +686,13 @@ Sound LoadSoundFromWave(Wave wave) ma_format formatIn = ((wave.sampleSize == 8)? ma_format_u8 : ((wave.sampleSize == 16)? ma_format_s16 : ma_format_f32)); ma_uint32 frameCountIn = wave.sampleCount/wave.channels; - ma_uint32 frameCount = (ma_uint32)ma_convert_frames(NULL, DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, NULL, formatIn, wave.channels, wave.sampleRate, frameCountIn); + ma_uint32 frameCount = (ma_uint32)ma_convert_frames(NULL, 0, DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, NULL, frameCountIn, formatIn, wave.channels, wave.sampleRate); if (frameCount == 0) TRACELOG(LOG_WARNING, "LoadSoundFromWave() : Failed to get frame count for format conversion"); AudioBuffer *audioBuffer = InitAudioBuffer(DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, frameCount, AUDIO_BUFFER_USAGE_STATIC); if (audioBuffer == NULL) TRACELOG(LOG_WARNING, "LoadSoundFromWave() : Failed to create audio buffer"); - frameCount = (ma_uint32)ma_convert_frames(audioBuffer->data, audioBuffer->dsp.formatConverterIn.config.formatIn, audioBuffer->dsp.formatConverterIn.config.channels, audioBuffer->dsp.src.config.sampleRateIn, wave.data, formatIn, wave.channels, wave.sampleRate, frameCountIn); + frameCount = (ma_uint32)ma_convert_frames(audioBuffer->data, frameCount, DEVICE_FORMAT, DEVICE_CHANNELS, DEVICE_SAMPLE_RATE, wave.data, frameCountIn, formatIn, wave.channels, wave.sampleRate); if (frameCount == 0) TRACELOG(LOG_WARNING, "LoadSoundFromWave() : Format conversion failed"); sound.sampleCount = frameCount*DEVICE_CHANNELS; @@ -666,7 +731,7 @@ void UpdateSound(Sound sound, const void *data, int samplesCount) StopAudioBuffer(audioBuffer); // TODO: May want to lock/unlock this since this data buffer is read at mixing time - memcpy(audioBuffer->data, data, samplesCount*audioBuffer->dsp.formatConverterIn.config.channels*ma_get_bytes_per_sample(audioBuffer->dsp.formatConverterIn.config.formatIn)); + memcpy(audioBuffer->data, data, samplesCount*ma_get_bytes_per_frame(audioBuffer->converter.config.formatIn, audioBuffer->converter.config.channelsIn)); } else TRACELOG(LOG_ERROR, "UpdateSound() : Invalid sound - no audio buffer"); } @@ -869,7 +934,7 @@ void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels) ma_uint32 frameCountIn = wave->sampleCount; // Is wave->sampleCount actually the frame count? That terminology needs to change, if so. - ma_uint32 frameCount = (ma_uint32)ma_convert_frames(NULL, formatOut, channels, sampleRate, NULL, formatIn, wave->channels, wave->sampleRate, frameCountIn); + ma_uint32 frameCount = (ma_uint32)ma_convert_frames(NULL, 0, formatOut, channels, sampleRate, NULL, frameCountIn, formatIn, wave->channels, wave->sampleRate); if (frameCount == 0) { TRACELOG(LOG_ERROR, "WaveFormat() : Failed to get frame count for format conversion."); @@ -878,7 +943,7 @@ void WaveFormat(Wave *wave, int sampleRate, int sampleSize, int channels) void *data = RL_MALLOC(frameCount*channels*(sampleSize/8)); - frameCount = (ma_uint32)ma_convert_frames(data, formatOut, channels, sampleRate, wave->data, formatIn, wave->channels, wave->sampleRate, frameCountIn); + frameCount = (ma_uint32)ma_convert_frames(data, frameCount, formatOut, channels, sampleRate, wave->data, frameCountIn, formatIn, wave->channels, wave->sampleRate); if (frameCount == 0) { TRACELOG(LOG_ERROR, "WaveFormat() : Format conversion failed."); @@ -1012,7 +1077,7 @@ Music LoadMusicStream(const char *fileName) music.ctxType = MUSIC_AUDIO_MP3; music.stream = InitAudioStream(ctxMp3->sampleRate, 32, ctxMp3->channels); - music.sampleCount = drmp3_get_pcm_frame_count(ctxMp3)*ctxMp3->channels; + music.sampleCount = (unsigned int)drmp3_get_pcm_frame_count(ctxMp3)*ctxMp3->channels; music.loopCount = 0; // Infinite loop by default musicLoaded = true; } @@ -1487,6 +1552,142 @@ static void OnLog(ma_context *pContext, ma_device *pDevice, ma_uint32 logLevel, TRACELOG(LOG_ERROR, message); // All log messages from miniaudio are errors } +// Reads audio data from an AudioBuffer object in internal format. +static ma_uint32 ReadAudioBufferFramesInInternalFormat(AudioBuffer *audioBuffer, void *framesOut, ma_uint32 frameCount) +{ + ma_uint32 subBufferSizeInFrames = (audioBuffer->sizeInFrames > 1)? audioBuffer->sizeInFrames/2 : audioBuffer->sizeInFrames; + ma_uint32 currentSubBufferIndex = audioBuffer->frameCursorPos/subBufferSizeInFrames; + + if (currentSubBufferIndex > 1) + { + TRACELOGD("Frame cursor position moved too far forward in audio stream"); + return 0; + } + + // Another thread can update the processed state of buffers so + // we just take a copy here to try and avoid potential synchronization problems + bool isSubBufferProcessed[2]; + isSubBufferProcessed[0] = audioBuffer->isSubBufferProcessed[0]; + isSubBufferProcessed[1] = audioBuffer->isSubBufferProcessed[1]; + + ma_uint32 frameSizeInBytes = ma_get_bytes_per_frame(audioBuffer->converter.config.formatIn, audioBuffer->converter.config.channelsIn); + + // Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0 + ma_uint32 framesRead = 0; + while (1) + { + // We break from this loop differently depending on the buffer's usage + // - For static buffers, we simply fill as much data as we can + // - For streaming buffers we only fill the halves of the buffer that are processed + // Unprocessed halves must keep their audio data in-tact + if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC) + { + if (framesRead >= frameCount) break; + } + else + { + if (isSubBufferProcessed[currentSubBufferIndex]) break; + } + + ma_uint32 totalFramesRemaining = (frameCount - framesRead); + if (totalFramesRemaining == 0) break; + + ma_uint32 framesRemainingInOutputBuffer; + if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC) + { + framesRemainingInOutputBuffer = audioBuffer->sizeInFrames - audioBuffer->frameCursorPos; + } + else + { + ma_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames*currentSubBufferIndex; + framesRemainingInOutputBuffer = subBufferSizeInFrames - (audioBuffer->frameCursorPos - firstFrameIndexOfThisSubBuffer); + } + + ma_uint32 framesToRead = totalFramesRemaining; + if (framesToRead > framesRemainingInOutputBuffer) framesToRead = framesRemainingInOutputBuffer; + + memcpy((unsigned char *)framesOut + (framesRead*frameSizeInBytes), audioBuffer->data + (audioBuffer->frameCursorPos*frameSizeInBytes), framesToRead*frameSizeInBytes); + audioBuffer->frameCursorPos = (audioBuffer->frameCursorPos + framesToRead)%audioBuffer->sizeInFrames; + framesRead += framesToRead; + + // If we've read to the end of the buffer, mark it as processed + if (framesToRead == framesRemainingInOutputBuffer) + { + audioBuffer->isSubBufferProcessed[currentSubBufferIndex] = true; + isSubBufferProcessed[currentSubBufferIndex] = true; + + currentSubBufferIndex = (currentSubBufferIndex + 1)%2; + + // We need to break from this loop if we're not looping + if (!audioBuffer->looping) + { + StopAudioBuffer(audioBuffer); + break; + } + } + } + + // Zero-fill excess + ma_uint32 totalFramesRemaining = (frameCount - framesRead); + if (totalFramesRemaining > 0) + { + memset((unsigned char *)framesOut + (framesRead*frameSizeInBytes), 0, totalFramesRemaining*frameSizeInBytes); + + // For static buffers we can fill the remaining frames with silence for safety, but we don't want + // to report those frames as "read". The reason for this is that the caller uses the return value + // to know whether or not a non-looping sound has finished playback. + if (audioBuffer->usage != AUDIO_BUFFER_USAGE_STATIC) framesRead += totalFramesRemaining; + } + + return framesRead; +} + +// Reads audio data from an AudioBuffer object in device format. Returned data will be in a format appropriate for mixing. +static ma_uint32 ReadAudioBufferFramesInMixingFormat(AudioBuffer *audioBuffer, float *framesOut, ma_uint32 frameCount) +{ + // What's going on here is that we're continuously converting data from the AudioBuffer's internal format to the mixing format, which + // should be defined by the output format of the data converter. We do this until frameCount frames have been output. The important + // detail to remember here is that we never, ever attempt to read more input data than is required for the specified number of output + // frames. This can be achieved with ma_data_converter_get_required_input_frame_count(). + ma_uint8 inputBuffer[4096]; + ma_uint32 inputBufferFrameCap = sizeof(inputBuffer) / ma_get_bytes_per_frame(audioBuffer->converter.config.formatIn, audioBuffer->converter.config.channelsIn); + + ma_uint32 totalOutputFramesProcessed = 0; + while (totalOutputFramesProcessed < frameCount) + { + ma_uint64 outputFramesToProcessThisIteration = frameCount - totalOutputFramesProcessed; + + ma_uint64 inputFramesToProcessThisIteration = ma_data_converter_get_required_input_frame_count(&audioBuffer->converter, outputFramesToProcessThisIteration); + if (inputFramesToProcessThisIteration > inputBufferFrameCap) + { + inputFramesToProcessThisIteration = inputBufferFrameCap; + } + + float *runningFramesOut = framesOut + (totalOutputFramesProcessed * audioBuffer->converter.config.channelsOut); + + /* At this point we can convert the data to our mixing format. */ + ma_uint64 inputFramesProcessedThisIteration = ReadAudioBufferFramesInInternalFormat(audioBuffer, inputBuffer, (ma_uint32)inputFramesToProcessThisIteration); /* Safe cast. */ + ma_uint64 outputFramesProcessedThisIteration = outputFramesToProcessThisIteration; + ma_data_converter_process_pcm_frames(&audioBuffer->converter, inputBuffer, &inputFramesProcessedThisIteration, runningFramesOut, &outputFramesProcessedThisIteration); + + totalOutputFramesProcessed += (ma_uint32)outputFramesProcessedThisIteration; /* Safe cast. */ + + if (inputFramesProcessedThisIteration < inputFramesToProcessThisIteration) + { + break; /* Ran out of input data. */ + } + + /* This should never be hit, but will add it here for safety. Ensures we get out of the loop when no input nor output frames are processed. */ + if (inputFramesProcessedThisIteration == 0 && outputFramesProcessedThisIteration == 0) + { + break; + } + } + + return totalOutputFramesProcessed; +} + + // Sending audio data to device callback function // NOTE: All the mixing takes place here static void OnSendAudioDataToDevice(ma_device *pDevice, void *pFramesOut, const void *pFramesInput, ma_uint32 frameCount) @@ -1530,7 +1731,7 @@ static void OnSendAudioDataToDevice(ma_device *pDevice, void *pFramesOut, const framesToReadRightNow = sizeof(tempBuffer)/sizeof(tempBuffer[0])/DEVICE_CHANNELS; } - ma_uint32 framesJustRead = (ma_uint32)ma_pcm_converter_read(&audioBuffer->dsp, tempBuffer, framesToReadRightNow); + ma_uint32 framesJustRead = ReadAudioBufferFramesInMixingFormat(audioBuffer, tempBuffer, framesToReadRightNow); if (framesJustRead > 0) { float *framesOut = (float *)pFramesOut + (framesRead*AUDIO.System.device.playback.channels); @@ -1576,98 +1777,6 @@ static void OnSendAudioDataToDevice(ma_device *pDevice, void *pFramesOut, const ma_mutex_unlock(&AUDIO.System.lock); } -// DSP read from audio buffer callback function -static ma_uint32 OnAudioBufferDSPRead(ma_pcm_converter *pDSP, void *pFramesOut, ma_uint32 frameCount, void *pUserData) -{ - AudioBuffer *audioBuffer = (AudioBuffer *)pUserData; - - ma_uint32 subBufferSizeInFrames = (audioBuffer->sizeInFrames > 1)? audioBuffer->sizeInFrames/2 : audioBuffer->sizeInFrames; - ma_uint32 currentSubBufferIndex = audioBuffer->frameCursorPos/subBufferSizeInFrames; - - if (currentSubBufferIndex > 1) - { - TRACELOGD("Frame cursor position moved too far forward in audio stream"); - return 0; - } - - // Another thread can update the processed state of buffers so - // we just take a copy here to try and avoid potential synchronization problems - bool isSubBufferProcessed[2]; - isSubBufferProcessed[0] = audioBuffer->isSubBufferProcessed[0]; - isSubBufferProcessed[1] = audioBuffer->isSubBufferProcessed[1]; - - ma_uint32 frameSizeInBytes = ma_get_bytes_per_sample(audioBuffer->dsp.formatConverterIn.config.formatIn)*audioBuffer->dsp.formatConverterIn.config.channels; - - // Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0 - ma_uint32 framesRead = 0; - while (1) - { - // We break from this loop differently depending on the buffer's usage - // - For static buffers, we simply fill as much data as we can - // - For streaming buffers we only fill the halves of the buffer that are processed - // Unprocessed halves must keep their audio data in-tact - if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC) - { - if (framesRead >= frameCount) break; - } - else - { - if (isSubBufferProcessed[currentSubBufferIndex]) break; - } - - ma_uint32 totalFramesRemaining = (frameCount - framesRead); - if (totalFramesRemaining == 0) break; - - ma_uint32 framesRemainingInOutputBuffer; - if (audioBuffer->usage == AUDIO_BUFFER_USAGE_STATIC) - { - framesRemainingInOutputBuffer = audioBuffer->sizeInFrames - audioBuffer->frameCursorPos; - } - else - { - ma_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames*currentSubBufferIndex; - framesRemainingInOutputBuffer = subBufferSizeInFrames - (audioBuffer->frameCursorPos - firstFrameIndexOfThisSubBuffer); - } - - ma_uint32 framesToRead = totalFramesRemaining; - if (framesToRead > framesRemainingInOutputBuffer) framesToRead = framesRemainingInOutputBuffer; - - memcpy((unsigned char *)pFramesOut + (framesRead*frameSizeInBytes), audioBuffer->data + (audioBuffer->frameCursorPos*frameSizeInBytes), framesToRead*frameSizeInBytes); - audioBuffer->frameCursorPos = (audioBuffer->frameCursorPos + framesToRead)%audioBuffer->sizeInFrames; - framesRead += framesToRead; - - // If we've read to the end of the buffer, mark it as processed - if (framesToRead == framesRemainingInOutputBuffer) - { - audioBuffer->isSubBufferProcessed[currentSubBufferIndex] = true; - isSubBufferProcessed[currentSubBufferIndex] = true; - - currentSubBufferIndex = (currentSubBufferIndex + 1)%2; - - // We need to break from this loop if we're not looping - if (!audioBuffer->looping) - { - StopAudioBuffer(audioBuffer); - break; - } - } - } - - // Zero-fill excess - ma_uint32 totalFramesRemaining = (frameCount - framesRead); - if (totalFramesRemaining > 0) - { - memset((unsigned char *)pFramesOut + (framesRead*frameSizeInBytes), 0, totalFramesRemaining*frameSizeInBytes); - - // For static buffers we can fill the remaining frames with silence for safety, but we don't want - // to report those frames as "read". The reason for this is that the caller uses the return value - // to know whether or not a non-looping sound has finished playback. - if (audioBuffer->usage != AUDIO_BUFFER_USAGE_STATIC) framesRead += totalFramesRemaining; - } - - return framesRead; -} - // This is the main mixing function. Mixing is pretty simple in this project - it's just an accumulation. // NOTE: framesOut is both an input and an output. It will be initially filled with zeros outside of this function. static void MixAudioFrames(float *framesOut, const float *framesIn, ma_uint32 frameCount, float localVolume) @@ -1679,7 +1788,7 @@ static void MixAudioFrames(float *framesOut, const float *framesIn, ma_uint32 fr float *frameOut = framesOut + (iFrame*AUDIO.System.device.playback.channels); const float *frameIn = framesIn + (iFrame*AUDIO.System.device.playback.channels); - frameOut[iChannel] += (frameIn[iChannel]*AUDIO.System.masterVolume*localVolume); + frameOut[iChannel] += (frameIn[iChannel]*localVolume); } } }