Added SDL_AUDIO_FRAMESIZE

main
Brick 2023-09-03 22:07:05 +01:00 committed by Sam Lantinga
parent 53122593f8
commit f2ca9a615b
11 changed files with 29 additions and 33 deletions

View File

@ -152,6 +152,9 @@ typedef struct SDL_AudioSpec
int freq; /**< sample rate: sample frames per second */
} SDL_AudioSpec;
/* Calculate the size of each audio frame (in bytes) */
#define SDL_AUDIO_FRAMESIZE(x) (SDL_AUDIO_BYTESIZE((x).format) * (x).channels)
/* SDL_AudioStream is an audio conversion interface.
- It can handle resampling data in chunks without generating
artifacts, when it doesn't have the complete buffer available.

View File

@ -832,9 +832,9 @@ SDL_bool SDL_OutputAudioThreadIterate(SDL_AudioDevice *device)
void SDL_OutputAudioThreadShutdown(SDL_AudioDevice *device)
{
SDL_assert(!device->iscapture);
const int samples = (device->buffer_size / SDL_AUDIO_BYTESIZE(device->spec.format)) / device->spec.channels;
const int frames = device->buffer_size / SDL_AUDIO_FRAMESIZE(device->spec);
// Wait for the audio to drain. !!! FIXME: don't bother waiting if device is lost.
SDL_Delay(((samples * 1000) / device->spec.freq) * 2);
SDL_Delay(((frames * 1000) / device->spec.freq) * 2);
current_audio.impl.ThreadDeinit(device);
SDL_AudioThreadFinalize(device);
}
@ -1261,7 +1261,7 @@ static int GetDefaultSampleFramesFromFreq(int freq)
void SDL_UpdatedAudioDeviceFormat(SDL_AudioDevice *device)
{
device->silence_value = SDL_GetSilenceValueForFormat(device->spec.format);
device->buffer_size = device->sample_frames * SDL_AUDIO_BYTESIZE(device->spec.format) * device->spec.channels;
device->buffer_size = device->sample_frames * SDL_AUDIO_FRAMESIZE(device->spec);
device->work_buffer_size = device->sample_frames * sizeof (float) * device->spec.channels;
device->work_buffer_size = SDL_max(device->buffer_size, device->work_buffer_size); // just in case we end up with a 64-bit audio format at some point.
}

View File

@ -1056,11 +1056,6 @@ static int CalculateMaxFrameSize(SDL_AudioFormat src_format, int src_channels, S
return max_format_size * max_channels;
}
static int GetAudioSpecFrameSize(const SDL_AudioSpec* spec)
{
return SDL_AUDIO_BYTESIZE(spec->format) * spec->channels;
}
static Sint64 GetStreamResampleRate(SDL_AudioStream* stream, int src_freq)
{
src_freq = (int)((float)src_freq * stream->freq_ratio);
@ -1070,7 +1065,7 @@ static Sint64 GetStreamResampleRate(SDL_AudioStream* stream, int src_freq)
static int ResetHistoryBuffer(SDL_AudioStream *stream, const SDL_AudioSpec *spec)
{
const size_t history_buffer_allocation = GetHistoryBufferSampleFrames() * GetAudioSpecFrameSize(spec);
const size_t history_buffer_allocation = GetHistoryBufferSampleFrames() * SDL_AUDIO_FRAMESIZE(*spec);
Uint8 *history_buffer = stream->history_buffer;
if (stream->history_buffer_allocation < history_buffer_allocation) {
@ -1313,7 +1308,7 @@ int SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
return -1;
}
if ((len % GetAudioSpecFrameSize(&stream->src_spec)) != 0) {
if ((len % SDL_AUDIO_FRAMESIZE(stream->src_spec)) != 0) {
SDL_UnlockMutex(stream->lock);
return SDL_SetError("Can't add partial sample frames");
}
@ -1398,7 +1393,7 @@ static void UpdateStreamHistoryBuffer(SDL_AudioStream* stream, const SDL_AudioSp
// Even if we aren't currently resampling, we always need to update the history buffer
Uint8 *history_buffer = stream->history_buffer;
int history_bytes = history_buffer_frames * GetAudioSpecFrameSize(spec);
int history_bytes = history_buffer_frames * SDL_AUDIO_FRAMESIZE(*spec);
if (left_padding != NULL) {
// Fill in the left padding using the history buffer
@ -1418,7 +1413,7 @@ static void UpdateStreamHistoryBuffer(SDL_AudioStream* stream, const SDL_AudioSp
static Sint64 GetAudioStreamTrackAvailableFrames(SDL_AudioStream* stream, SDL_AudioTrack* track, Sint64 resample_offset)
{
size_t input_frames = track->queued_bytes / GetAudioSpecFrameSize(&track->spec);
size_t input_frames = track->queued_bytes / SDL_AUDIO_FRAMESIZE(track->spec);
Sint64 resample_rate = GetStreamResampleRate(stream, track->spec.freq);
Sint64 output_frames = (Sint64) input_frames;
@ -1460,7 +1455,7 @@ static int GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int ou
const SDL_AudioFormat src_format = src_spec->format;
const int src_channels = src_spec->channels;
const int src_frame_size = GetAudioSpecFrameSize(src_spec);
const int src_frame_size = SDL_AUDIO_FRAMESIZE(*src_spec);
const SDL_AudioFormat dst_format = dst_spec->format;
const int dst_channels = dst_spec->channels;
@ -1646,7 +1641,7 @@ int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
return -1;
}
const int dst_frame_size = GetAudioSpecFrameSize(&stream->dst_spec);
const int dst_frame_size = SDL_AUDIO_FRAMESIZE(stream->dst_spec);
len -= len % dst_frame_size; // chop off any fractional sample frame.
@ -1663,7 +1658,7 @@ int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
approx_request = GetResamplerNeededInputFrames((int) approx_request, resample_rate, 0);
}
approx_request *= GetAudioSpecFrameSize(&stream->src_spec); // convert sample frames to bytes.
approx_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec); // convert sample frames to bytes.
if (approx_request > 0) { // don't call the callback if we can satisfy this request with existing data.
stream->get_callback(stream->get_callback_userdata, stream, (int) SDL_min(approx_request, SDL_INT_MAX));
@ -1749,7 +1744,7 @@ int SDL_GetAudioStreamAvailable(SDL_AudioStream *stream)
Sint64 count = GetAudioStreamAvailableFrames(stream);
// convert from sample frames to bytes in destination format.
count *= GetAudioSpecFrameSize(&stream->dst_spec);
count *= SDL_AUDIO_FRAMESIZE(stream->dst_spec);
SDL_UnlockMutex(stream->lock);

View File

@ -355,8 +355,7 @@ static int ALSA_PlayDevice(SDL_AudioDevice *device, const Uint8 *buffer, int buf
{
SDL_assert(buffer == device->hidden->mixbuf);
Uint8 *sample_buf = device->hidden->mixbuf;
const int frame_size = SDL_AUDIO_BYTESIZE(device->spec.format) *
device->spec.channels;
const int frame_size = SDL_AUDIO_FRAMESIZE(device->spec);
snd_pcm_uframes_t frames_left = (snd_pcm_uframes_t) (buflen / frame_size);
device->hidden->swizzle_func(device, sample_buf, frames_left);
@ -402,8 +401,7 @@ static Uint8 *ALSA_GetDeviceBuf(SDL_AudioDevice *device, int *buffer_size)
static int ALSA_CaptureFromDevice(SDL_AudioDevice *device, void *buffer, int buflen)
{
Uint8 *sample_buf = (Uint8 *)buffer;
const int frame_size = SDL_AUDIO_BYTESIZE(device->spec.format) *
device->spec.channels;
const int frame_size = SDL_AUDIO_FRAMESIZE(device->spec);
const int total_frames = buflen / frame_size;
snd_pcm_uframes_t frames_left = total_frames;

View File

@ -38,7 +38,7 @@ static Uint8 *EMSCRIPTENAUDIO_GetDeviceBuf(SDL_AudioDevice *device, int *buffer_
static int EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buffer, int buffer_size)
{
const int framelen = SDL_AUDIO_BYTESIZE(device->spec.format) * device->spec.channels;
const int framelen = SDL_AUDIO_FRAMESIZE(device->spec);
MAIN_THREAD_EM_ASM({
var SDL3 = Module['SDL3'];
var numChannels = SDL3.audio.currentOutputBuffer['numberOfChannels'];

View File

@ -161,7 +161,7 @@ static int N3DSAUDIO_OpenDevice(SDL_AudioDevice *device)
SDL_memset(device->hidden->waveBuf, 0, sizeof(ndspWaveBuf) * NUM_BUFFERS);
const int sample_frame_size = device->spec.channels * SDL_AUDIO_BYTESIZE(device->spec.format);
const int sample_frame_size = SDL_AUDIO_FRAMESIZE(device->spec);
for (unsigned i = 0; i < NUM_BUFFERS; i++) {
device->hidden->waveBuf[i].data_vaddr = data_vaddr;
device->hidden->waveBuf[i].nsamples = device->buffer_size / sample_frame_size;

View File

@ -1108,7 +1108,7 @@ static int PIPEWIRE_OpenDevice(SDL_AudioDevice *device)
}
/* Size of a single audio frame in bytes */
priv->stride = SDL_AUDIO_BYTESIZE(device->spec.format) * device->spec.channels;
priv->stride = SDL_AUDIO_FRAMESIZE(device->spec);
if (device->sample_frames < min_period) {
device->sample_frames = min_period;

View File

@ -621,7 +621,7 @@ static int mgmtthrtask_PrepDevice(void *userdata)
return -1;
}
device->hidden->framesize = SDL_AUDIO_BYTESIZE(device->spec.format) * device->spec.channels;
device->hidden->framesize = SDL_AUDIO_FRAMESIZE(device->spec);
if (device->iscapture) {
IAudioCaptureClient *capture = NULL;

View File

@ -513,7 +513,7 @@ static void StreamThing_ontick(Thing *thing, Uint64 now)
if (!available || (SDL_GetAudioStreamFormat(thing->data.stream.stream, NULL, &spec) < 0)) {
DestroyThingInPoof(thing);
} else {
const int ticksleft = (int) ((((Uint64) ((available / SDL_AUDIO_BYTESIZE(spec.format)) / spec.channels)) * 1000) / spec.freq);
const int ticksleft = (int) ((((Uint64) (available / SDL_AUDIO_FRAMESIZE(spec))) * 1000) / spec.freq);
const float pct = thing->data.stream.total_ticks ? (((float) (ticksleft)) / ((float) thing->data.stream.total_ticks)) : 0.0f;
thing->progress = 1.0f - pct;
}
@ -553,7 +553,7 @@ static void StreamThing_ondrop(Thing *thing, int button, float x, float y)
SDL_UnbindAudioStream(thing->data.stream.stream); /* unbind from current device */
if (thing->line_connected_to->what == THING_LOGDEV_CAPTURE) {
SDL_FlushAudioStream(thing->data.stream.stream);
thing->data.stream.total_ticks = (int) (((((Uint64) (SDL_GetAudioStreamAvailable(thing->data.stream.stream) / SDL_AUDIO_BYTESIZE(spec->format))) / spec->channels) * 1000) / spec->freq);
thing->data.stream.total_ticks = (int) ((((Uint64) (SDL_GetAudioStreamAvailable(thing->data.stream.stream) / SDL_AUDIO_FRAMESIZE(*spec))) * 1000) / spec->freq);
}
}
@ -596,7 +596,7 @@ static Thing *CreateStreamThing(const SDL_AudioSpec *spec, const Uint8 *buf, con
if (buf && buflen) {
SDL_PutAudioStreamData(thing->data.stream.stream, buf, (int) buflen);
SDL_FlushAudioStream(thing->data.stream.stream);
thing->data.stream.total_ticks = (int) (((((Uint64) (SDL_GetAudioStreamAvailable(thing->data.stream.stream) / SDL_AUDIO_BYTESIZE(spec->format))) / spec->channels) * 1000) / spec->freq);
thing->data.stream.total_ticks = (int) ((((Uint64) (SDL_GetAudioStreamAvailable(thing->data.stream.stream) / SDL_AUDIO_FRAMESIZE(*spec))) * 1000) / spec->freq);
}
thing->ontick = StreamThing_ontick;
thing->ondrag = StreamThing_ondrag;

View File

@ -292,7 +292,7 @@ static void loop(void)
if (SDL_GetAudioStreamFormat(stream, &src_spec, &dst_spec) == 0) {
available_bytes = SDL_GetAudioStreamAvailable(stream);
available_seconds = (float)available_bytes / (float)(SDL_AUDIO_BYTESIZE(dst_spec.format) * dst_spec.freq * dst_spec.channels);
available_seconds = (float)available_bytes / (float)(SDL_AUDIO_FRAMESIZE(dst_spec) * dst_spec.freq);
/* keep it looping. */
if (auto_loop && (available_seconds < 10.0f)) {

View File

@ -709,13 +709,13 @@ static int audio_convertAudio(void *arg)
Uint8 *dst_buf = NULL, *src_buf = NULL;
int dst_len = 0, src_len = 0, real_dst_len = 0;
int l = 64, m;
int src_samplesize, dst_samplesize;
int src_framesize, dst_framesize;
int src_silence, dst_silence;
src_samplesize = SDL_AUDIO_BYTESIZE(spec1.format) * spec1.channels;
dst_samplesize = SDL_AUDIO_BYTESIZE(spec2.format) * spec2.channels;
src_framesize = SDL_AUDIO_FRAMESIZE(spec1);
dst_framesize = SDL_AUDIO_FRAMESIZE(spec2);
src_len = l * src_samplesize;
src_len = l * src_framesize;
SDLTest_Log("Creating dummy sample buffer of %i length (%i bytes)", l, src_len);
src_buf = (Uint8 *)SDL_malloc(src_len);
SDLTest_AssertCheck(src_buf != NULL, "Check src data buffer to convert is not NULL");
@ -726,7 +726,7 @@ static int audio_convertAudio(void *arg)
src_silence = SDL_GetSilenceValueForFormat(spec1.format);
SDL_memset(src_buf, src_silence, src_len);
dst_len = ((int)((((Sint64)l * spec2.freq) - 1) / spec1.freq) + 1) * dst_samplesize;
dst_len = ((int)((((Sint64)l * spec2.freq) - 1) / spec1.freq) + 1) * dst_framesize;
dst_buf = (Uint8 *)SDL_malloc(dst_len);
SDLTest_AssertCheck(dst_buf != NULL, "Check dst data buffer to convert is not NULL");
if (dst_buf == NULL) {