early-access version 2820

This commit is contained in:
pineappleEA 2022-07-07 09:40:51 +02:00
parent 1d9d4902d8
commit 0530d98957
243 changed files with 32420 additions and 1084 deletions

View file

@ -1,7 +1,7 @@
yuzu emulator early access yuzu emulator early access
============= =============
This is the source code for early-access 2819. This is the source code for early-access 2820.
## Legal Notice ## Legal Notice

View file

@ -139,7 +139,7 @@ if(NOT BUNDLE_SPEEX)
endif() endif()
if(NOT TARGET speex) if(NOT TARGET speex)
add_library(speex STATIC subprojects/speex/resample.c) add_library(speex OBJECT subprojects/speex/resample.c)
set_target_properties(speex PROPERTIES POSITION_INDEPENDENT_CODE TRUE) set_target_properties(speex PROPERTIES POSITION_INDEPENDENT_CODE TRUE)
target_include_directories(speex INTERFACE subprojects) target_include_directories(speex INTERFACE subprojects)
target_compile_definitions(speex PUBLIC target_compile_definitions(speex PUBLIC
@ -259,7 +259,7 @@ if(USE_WASAPI)
target_sources(cubeb PRIVATE target_sources(cubeb PRIVATE
src/cubeb_wasapi.cpp) src/cubeb_wasapi.cpp)
target_compile_definitions(cubeb PRIVATE USE_WASAPI) target_compile_definitions(cubeb PRIVATE USE_WASAPI)
target_link_libraries(cubeb PRIVATE avrt ole32) target_link_libraries(cubeb PRIVATE avrt ole32 ksuser)
endif() endif()
check_include_files("windows.h;mmsystem.h" USE_WINMM) check_include_files("windows.h;mmsystem.h" USE_WINMM)
@ -284,11 +284,24 @@ if(HAVE_SYS_SOUNDCARD_H)
try_compile(USE_OSS "${PROJECT_BINARY_DIR}/compile_tests" try_compile(USE_OSS "${PROJECT_BINARY_DIR}/compile_tests"
${PROJECT_SOURCE_DIR}/cmake/compile_tests/oss_is_v4.c) ${PROJECT_SOURCE_DIR}/cmake/compile_tests/oss_is_v4.c)
if(USE_OSS) if(USE_OSS)
# strlcpy is not available on BSD systems that use glibc,
# like Debian kfreebsd, so try using libbsd if available
include(CheckSymbolExists)
check_symbol_exists(strlcpy string.h HAVE_STRLCPY)
if(NOT HAVE_STRLCPY)
pkg_check_modules(libbsd-overlay IMPORTED_TARGET libbsd-overlay)
if(libbsd-overlay_FOUND)
target_link_libraries(cubeb PRIVATE PkgConfig::libbsd-overlay)
set(HAVE_STRLCPY true)
endif()
endif()
if (HAVE_STRLCPY)
target_sources(cubeb PRIVATE target_sources(cubeb PRIVATE
src/cubeb_oss.c) src/cubeb_oss.c)
target_compile_definitions(cubeb PRIVATE USE_OSS) target_compile_definitions(cubeb PRIVATE USE_OSS)
endif() endif()
endif() endif()
endif()
check_include_files(android/log.h USE_AUDIOTRACK) check_include_files(android/log.h USE_AUDIOTRACK)
if(USE_AUDIOTRACK) if(USE_AUDIOTRACK)
@ -367,7 +380,7 @@ if(BUILD_TESTS)
add_executable(test_${NAME} test/test_${NAME}.cpp) add_executable(test_${NAME} test/test_${NAME}.cpp)
target_include_directories(test_${NAME} PRIVATE ${gtest_SOURCE_DIR}/include src) target_include_directories(test_${NAME} PRIVATE ${gtest_SOURCE_DIR}/include src)
target_link_libraries(test_${NAME} PRIVATE cubeb gtest_main) target_link_libraries(test_${NAME} PRIVATE cubeb gtest_main)
add_test(${NAME} test_${NAME}) add_test(${NAME} test_${NAME} --gtest_death_test_style=threadsafe)
add_sanitizers(test_${NAME}) add_sanitizers(test_${NAME})
install(TARGETS test_${NAME}) install(TARGETS test_${NAME})
endmacro(cubeb_add_test) endmacro(cubeb_add_test)

View file

@ -2,7 +2,7 @@
You must have CMake v3.1 or later installed. You must have CMake v3.1 or later installed.
1. `git clone --recursive https://github.com/kinetiknz/cubeb.git` 1. `git clone --recursive https://github.com/mozilla/cubeb.git`
2. `mkdir cubeb-build` 2. `mkdir cubeb-build`
3. `cd cubeb-build` 3. `cd cubeb-build`
3. `cmake ../cubeb` 3. `cmake ../cubeb`

View file

@ -935,7 +935,8 @@ aaudio_stream_init_impl(cubeb_stream * stm, cubeb_devid input_device,
stm->resampler = cubeb_resampler_create( stm->resampler = cubeb_resampler_create(
stm, input_stream_params ? &in_params : NULL, stm, input_stream_params ? &in_params : NULL,
output_stream_params ? &out_params : NULL, target_sample_rate, output_stream_params ? &out_params : NULL, target_sample_rate,
stm->data_callback, stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DEFAULT); stm->data_callback, stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DEFAULT,
CUBEB_RESAMPLER_RECLOCK_NONE);
if (!stm->resampler) { if (!stm->resampler) {
LOG("Failed to create resampler"); LOG("Failed to create resampler");

View file

@ -1442,6 +1442,13 @@ audiounit_destroy(cubeb * ctx)
audiounit_active_streams(ctx)); audiounit_active_streams(ctx));
} }
// Destroying a cubeb context with device collection callbacks registered
// is misuse of the API, assert then attempt to clean up.
assert(!ctx->input_collection_changed_callback &&
!ctx->input_collection_changed_user_ptr &&
!ctx->output_collection_changed_callback &&
!ctx->output_collection_changed_user_ptr);
/* Unregister the callback if necessary. */ /* Unregister the callback if necessary. */
if (ctx->input_collection_changed_callback) { if (ctx->input_collection_changed_callback) {
audiounit_remove_device_listener(ctx, CUBEB_DEVICE_TYPE_INPUT); audiounit_remove_device_listener(ctx, CUBEB_DEVICE_TYPE_INPUT);
@ -2700,7 +2707,8 @@ audiounit_setup_stream(cubeb_stream * stm)
stm->resampler.reset(cubeb_resampler_create( stm->resampler.reset(cubeb_resampler_create(
stm, has_input(stm) ? &input_unconverted_params : NULL, stm, has_input(stm) ? &input_unconverted_params : NULL,
has_output(stm) ? &stm->output_stream_params : NULL, target_sample_rate, has_output(stm) ? &stm->output_stream_params : NULL, target_sample_rate,
stm->data_callback, stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP)); stm->data_callback, stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP,
CUBEB_RESAMPLER_RECLOCK_NONE));
if (!stm->resampler) { if (!stm->resampler) {
LOG("(%p) Could not create resampler.", stm); LOG("(%p) Could not create resampler.", stm);
return CUBEB_ERROR; return CUBEB_ERROR;

View file

@ -925,15 +925,18 @@ cbjack_stream_init(cubeb * context, cubeb_stream ** stream,
if (stm->devs == DUPLEX) { if (stm->devs == DUPLEX) {
stm->resampler = cubeb_resampler_create( stm->resampler = cubeb_resampler_create(
stm, &stm->in_params, &stm->out_params, stream_actual_rate, stm, &stm->in_params, &stm->out_params, stream_actual_rate,
stm->data_callback, stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP); stm->data_callback, stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP,
CUBEB_RESAMPLER_RECLOCK_NONE);
} else if (stm->devs == IN_ONLY) { } else if (stm->devs == IN_ONLY) {
stm->resampler = cubeb_resampler_create( stm->resampler = cubeb_resampler_create(
stm, &stm->in_params, nullptr, stream_actual_rate, stm->data_callback, stm, &stm->in_params, nullptr, stream_actual_rate, stm->data_callback,
stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP); stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP,
CUBEB_RESAMPLER_RECLOCK_NONE);
} else if (stm->devs == OUT_ONLY) { } else if (stm->devs == OUT_ONLY) {
stm->resampler = cubeb_resampler_create( stm->resampler = cubeb_resampler_create(
stm, nullptr, &stm->out_params, stream_actual_rate, stm->data_callback, stm, nullptr, &stm->out_params, stream_actual_rate, stm->data_callback,
stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP); stm->user_ptr, CUBEB_RESAMPLER_QUALITY_DESKTOP,
CUBEB_RESAMPLER_RECLOCK_NONE);
} }
if (!stm->resampler) { if (!stm->resampler) {

View file

@ -1479,7 +1479,8 @@ opensl_stream_init(cubeb * ctx, cubeb_stream ** stream,
stm->resampler = cubeb_resampler_create( stm->resampler = cubeb_resampler_create(
stm, input_stream_params ? &input_params : NULL, stm, input_stream_params ? &input_params : NULL,
output_stream_params ? &output_params : NULL, target_sample_rate, output_stream_params ? &output_params : NULL, target_sample_rate,
data_callback, user_ptr, CUBEB_RESAMPLER_QUALITY_DEFAULT); data_callback, user_ptr, CUBEB_RESAMPLER_QUALITY_DEFAULT,
CUBEB_RESAMPLER_RECLOCK_NONE);
if (!stm->resampler) { if (!stm->resampler) {
LOG("Failed to create resampler"); LOG("Failed to create resampler");
opensl_stream_destroy(stm); opensl_stream_destroy(stm);

View file

@ -96,9 +96,8 @@ struct oss_stream {
oss_devnode_t name; oss_devnode_t name;
int fd; int fd;
void * buf; void * buf;
unsigned int nfr; /* Number of frames allocated */
unsigned int nfrags;
unsigned int bufframes; unsigned int bufframes;
unsigned int maxframes;
struct stream_info { struct stream_info {
int channels; int channels;
@ -824,9 +823,9 @@ retry:
pfds[0].fd = s->play.fd; pfds[0].fd = s->play.fd;
pfds[1].fd = -1; pfds[1].fd = -1;
goto retry; goto retry;
} else if (tnfr > (long)s->play.bufframes) { } else if (tnfr > (long)s->play.maxframes) {
/* too many frames available - limit */ /* too many frames available - limit */
tnfr = (long)s->play.bufframes; tnfr = (long)s->play.maxframes;
} }
if (nfr > tnfr) { if (nfr > tnfr) {
nfr = tnfr; nfr = tnfr;
@ -842,9 +841,9 @@ retry:
pfds[0].fd = -1; pfds[0].fd = -1;
pfds[1].fd = s->record.fd; pfds[1].fd = s->record.fd;
goto retry; goto retry;
} else if (tnfr > (long)s->record.bufframes) { } else if (tnfr > (long)s->record.maxframes) {
/* too many frames available - limit */ /* too many frames available - limit */
tnfr = (long)s->record.bufframes; tnfr = (long)s->record.maxframes;
} }
if (nfr > tnfr) { if (nfr > tnfr) {
nfr = tnfr; nfr = tnfr;
@ -1061,6 +1060,8 @@ oss_stream_init(cubeb * context, cubeb_stream ** stream,
} }
if (input_stream_params != NULL) { if (input_stream_params != NULL) {
unsigned int nb_channels; unsigned int nb_channels;
uint32_t minframes;
if (input_stream_params->prefs & CUBEB_STREAM_PREF_LOOPBACK) { if (input_stream_params->prefs & CUBEB_STREAM_PREF_LOOPBACK) {
LOG("Loopback not supported"); LOG("Loopback not supported");
ret = CUBEB_ERROR_NOT_SUPPORTED; ret = CUBEB_ERROR_NOT_SUPPORTED;
@ -1089,18 +1090,17 @@ oss_stream_init(cubeb * context, cubeb_stream ** stream,
(input_stream_params->format == CUBEB_SAMPLE_FLOAT32NE); (input_stream_params->format == CUBEB_SAMPLE_FLOAT32NE);
s->record.frame_size = s->record.frame_size =
s->record.info.channels * (s->record.info.precision / 8); s->record.info.channels * (s->record.info.precision / 8);
s->record.nfrags = OSS_NFRAGS; s->record.bufframes = latency_frames;
s->record.nfr = latency_frames / OSS_NFRAGS;
s->record.bufframes = s->record.nfrags * s->record.nfr; oss_get_min_latency(context, *input_stream_params, &minframes);
uint32_t minnfr; if (s->record.bufframes < minframes) {
oss_get_min_latency(context, *input_stream_params, &minnfr); s->record.bufframes = minframes;
if (s->record.nfr < minnfr) {
s->record.nfr = minnfr;
s->record.nfrags = latency_frames / minnfr;
} }
} }
if (output_stream_params != NULL) { if (output_stream_params != NULL) {
unsigned int nb_channels; unsigned int nb_channels;
uint32_t minframes;
if (output_stream_params->prefs & CUBEB_STREAM_PREF_LOOPBACK) { if (output_stream_params->prefs & CUBEB_STREAM_PREF_LOOPBACK) {
LOG("Loopback not supported"); LOG("Loopback not supported");
ret = CUBEB_ERROR_NOT_SUPPORTED; ret = CUBEB_ERROR_NOT_SUPPORTED;
@ -1128,19 +1128,16 @@ oss_stream_init(cubeb * context, cubeb_stream ** stream,
} }
s->play.floating = (output_stream_params->format == CUBEB_SAMPLE_FLOAT32NE); s->play.floating = (output_stream_params->format == CUBEB_SAMPLE_FLOAT32NE);
s->play.frame_size = s->play.info.channels * (s->play.info.precision / 8); s->play.frame_size = s->play.info.channels * (s->play.info.precision / 8);
s->play.nfrags = OSS_NFRAGS; s->play.bufframes = latency_frames;
s->play.nfr = latency_frames / OSS_NFRAGS;
uint32_t minnfr; oss_get_min_latency(context, *output_stream_params, &minframes);
oss_get_min_latency(context, *output_stream_params, &minnfr); if (s->play.bufframes < minframes) {
if (s->play.nfr < minnfr) { s->play.bufframes = minframes;
s->play.nfr = minnfr;
s->play.nfrags = latency_frames / minnfr;
} }
s->play.bufframes = s->play.nfrags * s->play.nfr;
} }
if (s->play.fd != -1) { if (s->play.fd != -1) {
int frag = oss_get_frag_params( int frag = oss_get_frag_params(
oss_calc_frag_shift(s->play.nfr, s->play.frame_size)); oss_calc_frag_shift(s->play.bufframes, s->play.frame_size));
if (ioctl(s->play.fd, SNDCTL_DSP_SETFRAGMENT, &frag)) if (ioctl(s->play.fd, SNDCTL_DSP_SETFRAGMENT, &frag))
LOG("Failed to set play fd with SNDCTL_DSP_SETFRAGMENT. frag: 0x%x", LOG("Failed to set play fd with SNDCTL_DSP_SETFRAGMENT. frag: 0x%x",
frag); frag);
@ -1148,19 +1145,28 @@ oss_stream_init(cubeb * context, cubeb_stream ** stream,
if (ioctl(s->play.fd, SNDCTL_DSP_GETOSPACE, &bi)) if (ioctl(s->play.fd, SNDCTL_DSP_GETOSPACE, &bi))
LOG("Failed to get play fd's buffer info."); LOG("Failed to get play fd's buffer info.");
else { else {
s->play.nfr = bi.fragsize / s->play.frame_size; s->play.bufframes = (bi.fragsize * bi.fragstotal) / s->play.frame_size;
s->play.nfrags = bi.fragments;
s->play.bufframes = s->play.nfr * s->play.nfrags;
} }
int lw;
int lw = s->play.frame_size; /*
* Force 32 ms service intervals at most, or when recording is
* active, use the recording service intervals as a reference.
*/
s->play.maxframes = (32 * output_stream_params->rate) / 1000;
if (s->record.fd != -1 || s->play.maxframes >= s->play.bufframes) {
lw = s->play.frame_size; /* Feed data when possible. */
s->play.maxframes = s->play.bufframes;
} else {
lw = (s->play.bufframes - s->play.maxframes) * s->play.frame_size;
}
if (ioctl(s->play.fd, SNDCTL_DSP_LOW_WATER, &lw)) if (ioctl(s->play.fd, SNDCTL_DSP_LOW_WATER, &lw))
LOG("Audio device \"%s\" (play) could not set trigger threshold", LOG("Audio device \"%s\" (play) could not set trigger threshold",
s->play.name); s->play.name);
} }
if (s->record.fd != -1) { if (s->record.fd != -1) {
int frag = oss_get_frag_params( int frag = oss_get_frag_params(
oss_calc_frag_shift(s->record.nfr, s->record.frame_size)); oss_calc_frag_shift(s->record.bufframes, s->record.frame_size));
if (ioctl(s->record.fd, SNDCTL_DSP_SETFRAGMENT, &frag)) if (ioctl(s->record.fd, SNDCTL_DSP_SETFRAGMENT, &frag))
LOG("Failed to set record fd with SNDCTL_DSP_SETFRAGMENT. frag: 0x%x", LOG("Failed to set record fd with SNDCTL_DSP_SETFRAGMENT. frag: 0x%x",
frag); frag);
@ -1168,11 +1174,11 @@ oss_stream_init(cubeb * context, cubeb_stream ** stream,
if (ioctl(s->record.fd, SNDCTL_DSP_GETISPACE, &bi)) if (ioctl(s->record.fd, SNDCTL_DSP_GETISPACE, &bi))
LOG("Failed to get record fd's buffer info."); LOG("Failed to get record fd's buffer info.");
else { else {
s->record.nfr = bi.fragsize / s->record.frame_size; s->record.bufframes =
s->record.nfrags = bi.fragments; (bi.fragsize * bi.fragstotal) / s->record.frame_size;
s->record.bufframes = s->record.nfr * s->record.nfrags;
} }
s->record.maxframes = s->record.bufframes;
int lw = s->record.frame_size; int lw = s->record.frame_size;
if (ioctl(s->record.fd, SNDCTL_DSP_LOW_WATER, &lw)) if (ioctl(s->record.fd, SNDCTL_DSP_LOW_WATER, &lw))
LOG("Audio device \"%s\" (record) could not set trigger threshold", LOG("Audio device \"%s\" (record) could not set trigger threshold",

View file

@ -783,6 +783,10 @@ pulse_context_destroy(cubeb * ctx)
static void static void
pulse_destroy(cubeb * ctx) pulse_destroy(cubeb * ctx)
{ {
assert(!ctx->input_collection_changed_callback &&
!ctx->input_collection_changed_user_ptr &&
!ctx->output_collection_changed_callback &&
!ctx->output_collection_changed_user_ptr);
free(ctx->context_name); free(ctx->context_name);
if (ctx->context) { if (ctx->context) {
pulse_context_destroy(ctx); pulse_context_destroy(ctx);

View file

@ -323,7 +323,8 @@ cubeb_resampler_create(cubeb_stream * stream,
cubeb_stream_params * input_params, cubeb_stream_params * input_params,
cubeb_stream_params * output_params, cubeb_stream_params * output_params,
unsigned int target_rate, cubeb_data_callback callback, unsigned int target_rate, cubeb_data_callback callback,
void * user_ptr, cubeb_resampler_quality quality) void * user_ptr, cubeb_resampler_quality quality,
cubeb_resampler_reclock reclock)
{ {
cubeb_sample_format format; cubeb_sample_format format;
@ -337,13 +338,13 @@ cubeb_resampler_create(cubeb_stream * stream,
switch (format) { switch (format) {
case CUBEB_SAMPLE_S16NE: case CUBEB_SAMPLE_S16NE:
return cubeb_resampler_create_internal<short>(stream, input_params, return cubeb_resampler_create_internal<short>(
output_params, target_rate, stream, input_params, output_params, target_rate, callback, user_ptr,
callback, user_ptr, quality); quality, reclock);
case CUBEB_SAMPLE_FLOAT32NE: case CUBEB_SAMPLE_FLOAT32NE:
return cubeb_resampler_create_internal<float>(stream, input_params, return cubeb_resampler_create_internal<float>(
output_params, target_rate, stream, input_params, output_params, target_rate, callback, user_ptr,
callback, user_ptr, quality); quality, reclock);
default: default:
assert(false); assert(false);
return nullptr; return nullptr;

View file

@ -21,6 +21,11 @@ typedef enum {
CUBEB_RESAMPLER_QUALITY_DESKTOP CUBEB_RESAMPLER_QUALITY_DESKTOP
} cubeb_resampler_quality; } cubeb_resampler_quality;
typedef enum {
CUBEB_RESAMPLER_RECLOCK_NONE,
CUBEB_RESAMPLER_RECLOCK_INPUT
} cubeb_resampler_reclock;
/** /**
* Create a resampler to adapt the requested sample rate into something that * Create a resampler to adapt the requested sample rate into something that
* is accepted by the audio backend. * is accepted by the audio backend.
@ -44,7 +49,8 @@ cubeb_resampler_create(cubeb_stream * stream,
cubeb_stream_params * input_params, cubeb_stream_params * input_params,
cubeb_stream_params * output_params, cubeb_stream_params * output_params,
unsigned int target_rate, cubeb_data_callback callback, unsigned int target_rate, cubeb_data_callback callback,
void * user_ptr, cubeb_resampler_quality quality); void * user_ptr, cubeb_resampler_quality quality,
cubeb_resampler_reclock reclock);
/** /**
* Fill the buffer with frames acquired using the data callback. Resampling will * Fill the buffer with frames acquired using the data callback. Resampling will

View file

@ -496,7 +496,8 @@ cubeb_resampler_create_internal(cubeb_stream * stream,
cubeb_stream_params * output_params, cubeb_stream_params * output_params,
unsigned int target_rate, unsigned int target_rate,
cubeb_data_callback callback, void * user_ptr, cubeb_data_callback callback, void * user_ptr,
cubeb_resampler_quality quality) cubeb_resampler_quality quality,
cubeb_resampler_reclock reclock)
{ {
std::unique_ptr<cubeb_resampler_speex_one_way<T>> input_resampler = nullptr; std::unique_ptr<cubeb_resampler_speex_one_way<T>> input_resampler = nullptr;
std::unique_ptr<cubeb_resampler_speex_one_way<T>> output_resampler = nullptr; std::unique_ptr<cubeb_resampler_speex_one_way<T>> output_resampler = nullptr;

View file

@ -11,24 +11,23 @@
#include "cubeb-internal.h" #include "cubeb-internal.h"
#include <windows.h> #include <windows.h>
/* This wraps a critical section to track the owner in debug mode, adapted from /* This wraps an SRWLock to track the owner in debug mode, adapted from
NSPR and http://blogs.msdn.com/b/oldnewthing/archive/2013/07/12/10433554.aspx NSPR and http://blogs.msdn.com/b/oldnewthing/archive/2013/07/12/10433554.aspx
*/ */
class owned_critical_section { class owned_critical_section {
public: public:
owned_critical_section() owned_critical_section()
: srwlock(SRWLOCK_INIT)
#ifndef NDEBUG #ifndef NDEBUG
: owner(0) ,
owner(0)
#endif #endif
{ {
InitializeCriticalSection(&critical_section);
} }
~owned_critical_section() { DeleteCriticalSection(&critical_section); }
void lock() void lock()
{ {
EnterCriticalSection(&critical_section); AcquireSRWLockExclusive(&srwlock);
#ifndef NDEBUG #ifndef NDEBUG
XASSERT(owner != GetCurrentThreadId() && "recursive locking"); XASSERT(owner != GetCurrentThreadId() && "recursive locking");
owner = GetCurrentThreadId(); owner = GetCurrentThreadId();
@ -41,7 +40,7 @@ public:
/* GetCurrentThreadId cannot return 0: it is not a the valid thread id */ /* GetCurrentThreadId cannot return 0: it is not a the valid thread id */
owner = 0; owner = 0;
#endif #endif
LeaveCriticalSection(&critical_section); ReleaseSRWLockExclusive(&srwlock);
} }
/* This is guaranteed to have the good behaviour if it succeeds. The behaviour /* This is guaranteed to have the good behaviour if it succeeds. The behaviour
@ -55,12 +54,12 @@ public:
} }
private: private:
CRITICAL_SECTION critical_section; SRWLOCK srwlock;
#ifndef NDEBUG #ifndef NDEBUG
DWORD owner; DWORD owner;
#endif #endif
// Disallow copy and assignment because CRICICAL_SECTION cannot be copied. // Disallow copy and assignment because SRWLock cannot be copied.
owned_critical_section(const owned_critical_section &); owned_critical_section(const owned_critical_section &);
owned_critical_section & operator=(const owned_critical_section &); owned_critical_section & operator=(const owned_critical_section &);
}; };

View file

@ -183,6 +183,46 @@ private:
extern cubeb_ops const wasapi_ops; extern cubeb_ops const wasapi_ops;
static com_heap_ptr<wchar_t>
wasapi_get_default_device_id(EDataFlow flow, ERole role,
IMMDeviceEnumerator * enumerator);
struct wasapi_default_devices {
wasapi_default_devices(IMMDeviceEnumerator * enumerator)
: render_console_id(
wasapi_get_default_device_id(eRender, eConsole, enumerator)),
render_comms_id(
wasapi_get_default_device_id(eRender, eCommunications, enumerator)),
capture_console_id(
wasapi_get_default_device_id(eCapture, eConsole, enumerator)),
capture_comms_id(
wasapi_get_default_device_id(eCapture, eCommunications, enumerator))
{
}
bool is_default(EDataFlow flow, ERole role, wchar_t const * id)
{
wchar_t const * default_id = nullptr;
if (flow == eRender && role == eConsole) {
default_id = this->render_console_id.get();
} else if (flow == eRender && role == eCommunications) {
default_id = this->render_comms_id.get();
} else if (flow == eCapture && role == eConsole) {
default_id = this->capture_console_id.get();
} else if (flow == eCapture && role == eCommunications) {
default_id = this->capture_comms_id.get();
}
return default_id && wcscmp(id, default_id) == 0;
}
private:
com_heap_ptr<wchar_t> render_console_id;
com_heap_ptr<wchar_t> render_comms_id;
com_heap_ptr<wchar_t> capture_console_id;
com_heap_ptr<wchar_t> capture_comms_id;
};
int int
wasapi_stream_stop(cubeb_stream * stm); wasapi_stream_stop(cubeb_stream * stm);
int int
@ -195,7 +235,8 @@ ERole
pref_to_role(cubeb_stream_prefs param); pref_to_role(cubeb_stream_prefs param);
int int
wasapi_create_device(cubeb * ctx, cubeb_device_info & ret, wasapi_create_device(cubeb * ctx, cubeb_device_info & ret,
IMMDeviceEnumerator * enumerator, IMMDevice * dev); IMMDeviceEnumerator * enumerator, IMMDevice * dev,
wasapi_default_devices * defaults);
void void
wasapi_destroy_device(cubeb_device_info * device_info); wasapi_destroy_device(cubeb_device_info * device_info);
static int static int
@ -216,6 +257,7 @@ class monitor_device_notifications;
struct cubeb { struct cubeb {
cubeb_ops const * ops = &wasapi_ops; cubeb_ops const * ops = &wasapi_ops;
owned_critical_section lock;
cubeb_strings * device_ids; cubeb_strings * device_ids;
/* Device enumerator to get notifications when the /* Device enumerator to get notifications when the
device collection change. */ device collection change. */
@ -716,6 +758,8 @@ intern_device_id(cubeb * ctx, wchar_t const * id)
{ {
XASSERT(id); XASSERT(id);
auto_lock lock(ctx->lock);
char const * tmp = wstr_to_utf8(id); char const * tmp = wstr_to_utf8(id);
if (!tmp) { if (!tmp) {
return nullptr; return nullptr;
@ -902,16 +946,16 @@ refill(cubeb_stream * stm, void * input_buffer, long input_frames_count,
return out_frames; return out_frames;
} }
int bool
trigger_async_reconfigure(cubeb_stream * stm) trigger_async_reconfigure(cubeb_stream * stm)
{ {
XASSERT(stm && stm->reconfigure_event); XASSERT(stm && stm->reconfigure_event);
LOG("Try reconfiguring the stream");
BOOL ok = SetEvent(stm->reconfigure_event); BOOL ok = SetEvent(stm->reconfigure_event);
if (!ok) { if (!ok) {
LOG("SetEvent on reconfigure_event failed: %lx", GetLastError()); LOG("SetEvent on reconfigure_event failed: %lx", GetLastError());
return CUBEB_ERROR;
} }
return CUBEB_OK; return static_cast<bool>(ok);
} }
/* This helper grabs all the frames available from a capture client, put them in /* This helper grabs all the frames available from a capture client, put them in
@ -940,8 +984,16 @@ get_input_buffer(cubeb_stream * stm)
if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
// Application can recover from this error. More info // Application can recover from this error. More info
// https://msdn.microsoft.com/en-us/library/windows/desktop/dd316605(v=vs.85).aspx // https://msdn.microsoft.com/en-us/library/windows/desktop/dd316605(v=vs.85).aspx
LOG("Device invalidated error, reset default device"); LOG("Input device invalidated error");
trigger_async_reconfigure(stm); // No need to reset device if user asks to use particular device, or
// switching is disabled.
if (stm->input_device_id ||
(stm->input_stream_params.prefs &
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING) ||
!trigger_async_reconfigure(stm)) {
stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_ERROR);
return false;
}
return true; return true;
} }
@ -1046,8 +1098,16 @@ get_output_buffer(cubeb_stream * stm, void *& buffer, size_t & frame_count)
if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
// Application can recover from this error. More info // Application can recover from this error. More info
// https://msdn.microsoft.com/en-us/library/windows/desktop/dd316605(v=vs.85).aspx // https://msdn.microsoft.com/en-us/library/windows/desktop/dd316605(v=vs.85).aspx
LOG("Device invalidated error, reset default device"); LOG("Output device invalidated error");
trigger_async_reconfigure(stm); // No need to reset device if user asks to use particular device, or
// switching is disabled.
if (stm->output_device_id ||
(stm->output_stream_params.prefs &
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING) ||
!trigger_async_reconfigure(stm)) {
stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_ERROR);
return false;
}
return true; return true;
} }
@ -1397,7 +1457,7 @@ wasapi_destroy(cubeb * context);
HRESULT HRESULT
register_notification_client(cubeb_stream * stm) register_notification_client(cubeb_stream * stm)
{ {
XASSERT(stm->device_enumerator); XASSERT(stm->device_enumerator && !stm->notification_client);
stm->notification_client.reset(new wasapi_endpoint_notification_client( stm->notification_client.reset(new wasapi_endpoint_notification_client(
stm->reconfigure_event, stm->role)); stm->reconfigure_event, stm->role));
@ -1415,7 +1475,7 @@ register_notification_client(cubeb_stream * stm)
HRESULT HRESULT
unregister_notification_client(cubeb_stream * stm) unregister_notification_client(cubeb_stream * stm)
{ {
XASSERT(stm->device_enumerator); XASSERT(stm->device_enumerator && stm->notification_client);
HRESULT hr = stm->device_enumerator->UnregisterEndpointNotificationCallback( HRESULT hr = stm->device_enumerator->UnregisterEndpointNotificationCallback(
stm->notification_client.get()); stm->notification_client.get());
@ -1454,6 +1514,9 @@ get_endpoint(com_ptr<IMMDevice> & device, LPCWSTR devid)
HRESULT HRESULT
register_collection_notification_client(cubeb * context) register_collection_notification_client(cubeb * context)
{ {
context->lock.assert_current_thread_owns();
XASSERT(!context->device_collection_enumerator &&
!context->collection_notification_client);
HRESULT hr = CoCreateInstance( HRESULT hr = CoCreateInstance(
__uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER, __uuidof(MMDeviceEnumerator), NULL, CLSCTX_INPROC_SERVER,
IID_PPV_ARGS(context->device_collection_enumerator.receive())); IID_PPV_ARGS(context->device_collection_enumerator.receive()));
@ -1480,6 +1543,9 @@ register_collection_notification_client(cubeb * context)
HRESULT HRESULT
unregister_collection_notification_client(cubeb * context) unregister_collection_notification_client(cubeb * context)
{ {
context->lock.assert_current_thread_owns();
XASSERT(context->device_collection_enumerator &&
context->collection_notification_client);
HRESULT hr = context->device_collection_enumerator HRESULT hr = context->device_collection_enumerator
->UnregisterEndpointNotificationCallback( ->UnregisterEndpointNotificationCallback(
context->collection_notification_client.get()); context->collection_notification_client.get());
@ -1608,6 +1674,7 @@ wasapi_init(cubeb ** context, char const * context_name)
cubeb * ctx = new cubeb(); cubeb * ctx = new cubeb();
ctx->ops = &wasapi_ops; ctx->ops = &wasapi_ops;
auto_lock lock(ctx->lock);
if (cubeb_strings_init(&ctx->device_ids) != CUBEB_OK) { if (cubeb_strings_init(&ctx->device_ids) != CUBEB_OK) {
delete ctx; delete ctx;
return CUBEB_ERROR; return CUBEB_ERROR;
@ -1682,6 +1749,10 @@ stop_and_join_render_thread(cubeb_stream * stm)
void void
wasapi_destroy(cubeb * context) wasapi_destroy(cubeb * context)
{ {
auto_lock lock(context->lock);
XASSERT(!context->device_collection_enumerator &&
!context->collection_notification_client);
if (context->device_ids) { if (context->device_ids) {
cubeb_strings_destroy(context->device_ids); cubeb_strings_destroy(context->device_ids);
} }
@ -1887,7 +1958,7 @@ handle_channel_layout(cubeb_stream * stm, EDataFlow direction,
} }
} }
static bool static int
initialize_iaudioclient2(com_ptr<IAudioClient> & audio_client) initialize_iaudioclient2(com_ptr<IAudioClient> & audio_client)
{ {
com_ptr<IAudioClient2> audio_client2; com_ptr<IAudioClient2> audio_client2;
@ -1910,8 +1981,8 @@ initialize_iaudioclient2(com_ptr<IAudioClient> & audio_client)
return CUBEB_OK; return CUBEB_OK;
} }
// Not static to suppress a warning. #if 0
/* static */ bool bool
initialize_iaudioclient3(com_ptr<IAudioClient> & audio_client, initialize_iaudioclient3(com_ptr<IAudioClient> & audio_client,
cubeb_stream * stm, cubeb_stream * stm,
const com_heap_ptr<WAVEFORMATEX> & mix_format, const com_heap_ptr<WAVEFORMATEX> & mix_format,
@ -2021,6 +2092,7 @@ initialize_iaudioclient3(com_ptr<IAudioClient> & audio_client,
LOG("Could not initialize shared stream with IAudioClient3: error: %lx", hr); LOG("Could not initialize shared stream with IAudioClient3: error: %lx", hr);
return false; return false;
} }
#endif
#define DIRECTION_NAME (direction == eCapture ? "capture" : "render") #define DIRECTION_NAME (direction == eCapture ? "capture" : "render")
@ -2035,6 +2107,8 @@ setup_wasapi_stream_one_side(cubeb_stream * stm,
cubeb_stream_params * mix_params, cubeb_stream_params * mix_params,
com_ptr<IMMDevice> & device) com_ptr<IMMDevice> & device)
{ {
XASSERT(direction == eCapture || direction == eRender);
HRESULT hr; HRESULT hr;
bool is_loopback = stream_params->prefs & CUBEB_STREAM_PREF_LOOPBACK; bool is_loopback = stream_params->prefs & CUBEB_STREAM_PREF_LOOPBACK;
if (is_loopback && direction != eCapture) { if (is_loopback && direction != eCapture) {
@ -2043,6 +2117,10 @@ setup_wasapi_stream_one_side(cubeb_stream * stm,
} }
stm->stream_reset_lock.assert_current_thread_owns(); stm->stream_reset_lock.assert_current_thread_owns();
// If user doesn't specify a particular device, we can choose another one when
// the given devid is unavailable.
bool allow_fallback =
direction == eCapture ? !stm->input_device_id : !stm->output_device_id;
bool try_again = false; bool try_again = false;
// This loops until we find a device that works, or we've exhausted all // This loops until we find a device that works, or we've exhausted all
// possibilities. // possibilities.
@ -2092,7 +2170,7 @@ setup_wasapi_stream_one_side(cubeb_stream * stm,
DIRECTION_NAME, hr); DIRECTION_NAME, hr);
// A particular device can't be activated because it has been // A particular device can't be activated because it has been
// unplugged, try fall back to the default audio device. // unplugged, try fall back to the default audio device.
if (devid && hr == AUDCLNT_E_DEVICE_INVALIDATED) { if (devid && hr == AUDCLNT_E_DEVICE_INVALIDATED && allow_fallback) {
LOG("Trying again with the default %s audio device.", DIRECTION_NAME); LOG("Trying again with the default %s audio device.", DIRECTION_NAME);
devid = nullptr; devid = nullptr;
device = nullptr; device = nullptr;
@ -2169,40 +2247,37 @@ setup_wasapi_stream_one_side(cubeb_stream * stm,
flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK; flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
} }
// Sanity check the latency, it may be that the device doesn't support it.
REFERENCE_TIME minimum_period;
REFERENCE_TIME default_period;
hr = audio_client->GetDevicePeriod(&default_period, &minimum_period);
if (FAILED(hr)) {
LOG("Could not get device period: %lx", hr);
return CUBEB_ERROR;
}
REFERENCE_TIME latency_hns = frames_to_hns(stream_params->rate, stm->latency); REFERENCE_TIME latency_hns = frames_to_hns(stream_params->rate, stm->latency);
// Adjust input latency and check if input is using bluetooth handsfree
// protocol.
if (direction == eCapture) {
stm->input_bluetooth_handsfree = false; stm->input_bluetooth_handsfree = false;
wasapi_default_devices default_devices(stm->device_enumerator.get());
cubeb_device_info device_info; cubeb_device_info device_info;
if (wasapi_create_device(stm->context, device_info, if (wasapi_create_device(stm->context, device_info,
stm->device_enumerator.get(), stm->device_enumerator.get(), device.get(),
device.get()) == CUBEB_OK) { &default_devices) == CUBEB_OK) {
// This multiplicator has been found empirically.
XASSERT(device_info.latency_hi > 0);
uint32_t latency_frames = device_info.latency_hi * 8;
LOG("Input: latency increased to %u frames from a default of %u",
latency_frames, device_info.latency_hi);
latency_hns = frames_to_hns(device_info.default_rate, latency_frames);
const char * HANDSFREE_TAG = "BTHHFENUM"; const char * HANDSFREE_TAG = "BTHHFENUM";
size_t len = sizeof(HANDSFREE_TAG); size_t len = sizeof(HANDSFREE_TAG);
if (direction == eCapture) {
uint32_t default_period_frames =
hns_to_frames(device_info.default_rate, default_period);
if (strlen(device_info.group_id) >= len && if (strlen(device_info.group_id) >= len &&
strncmp(device_info.group_id, HANDSFREE_TAG, len) == 0) { strncmp(device_info.group_id, HANDSFREE_TAG, len) == 0) {
LOG("Input device is using bluetooth handsfree protocol");
stm->input_bluetooth_handsfree = true; stm->input_bluetooth_handsfree = true;
} }
// This multiplicator has been found empirically.
uint32_t latency_frames = default_period_frames * 8;
LOG("Input: latency increased to %u frames from a default of %u",
latency_frames, default_period_frames);
latency_hns = frames_to_hns(device_info.default_rate, latency_frames);
}
wasapi_destroy_device(&device_info); wasapi_destroy_device(&device_info);
} else { } else {
LOG("Could not get cubeb_device_info."); LOG("Could not get cubeb_device_info. Skip customizing input settings");
}
} }
if (stream_params->prefs & CUBEB_STREAM_PREF_RAW) { if (stream_params->prefs & CUBEB_STREAM_PREF_RAW) {
@ -2258,8 +2333,10 @@ setup_wasapi_stream_one_side(cubeb_stream * stm,
#undef DIRECTION_NAME #undef DIRECTION_NAME
void // Returns a non-null cubeb_devid if we find a matched device, or nullptr
wasapi_find_matching_output_device(cubeb_stream * stm) // otherwise.
cubeb_devid
wasapi_find_bt_handsfree_output_device(cubeb_stream * stm)
{ {
HRESULT hr; HRESULT hr;
cubeb_device_info * input_device = nullptr; cubeb_device_info * input_device = nullptr;
@ -2268,19 +2345,21 @@ wasapi_find_matching_output_device(cubeb_stream * stm)
// Only try to match to an output device if the input device is a bluetooth // Only try to match to an output device if the input device is a bluetooth
// device that is using the handsfree protocol // device that is using the handsfree protocol
if (!stm->input_bluetooth_handsfree) { if (!stm->input_bluetooth_handsfree) {
return; return nullptr;
} }
wchar_t * tmp = nullptr; wchar_t * tmp = nullptr;
hr = stm->input_device->GetId(&tmp); hr = stm->input_device->GetId(&tmp);
if (FAILED(hr)) { if (FAILED(hr)) {
LOG("Couldn't get input device id in wasapi_find_matching_output_device"); LOG("Couldn't get input device id in "
return; "wasapi_find_bt_handsfree_output_device");
return nullptr;
} }
com_heap_ptr<wchar_t> device_id(tmp); com_heap_ptr<wchar_t> device_id(tmp);
cubeb_devid input_device_id = intern_device_id(stm->context, device_id.get()); cubeb_devid input_device_id = reinterpret_cast<cubeb_devid>(
intern_device_id(stm->context, device_id.get()));
if (!input_device_id) { if (!input_device_id) {
return; return nullptr;
} }
int rv = wasapi_enumerate_devices( int rv = wasapi_enumerate_devices(
@ -2288,7 +2367,7 @@ wasapi_find_matching_output_device(cubeb_stream * stm)
(cubeb_device_type)(CUBEB_DEVICE_TYPE_INPUT | CUBEB_DEVICE_TYPE_OUTPUT), (cubeb_device_type)(CUBEB_DEVICE_TYPE_INPUT | CUBEB_DEVICE_TYPE_OUTPUT),
&collection); &collection);
if (rv != CUBEB_OK) { if (rv != CUBEB_OK) {
return; return nullptr;
} }
// Find the input device, and then find the output device with the same group // Find the input device, and then find the output device with the same group
@ -2300,19 +2379,36 @@ wasapi_find_matching_output_device(cubeb_stream * stm)
} }
} }
cubeb_devid matched_output = nullptr;
if (input_device) {
for (uint32_t i = 0; i < collection.count; i++) { for (uint32_t i = 0; i < collection.count; i++) {
cubeb_device_info & dev = collection.device[i]; cubeb_device_info & dev = collection.device[i];
if (dev.type == CUBEB_DEVICE_TYPE_OUTPUT && dev.group_id && input_device && if (dev.type == CUBEB_DEVICE_TYPE_OUTPUT && dev.group_id &&
!strcmp(dev.group_id, input_device->group_id) && !strcmp(dev.group_id, input_device->group_id) &&
dev.default_rate == input_device->default_rate) { dev.default_rate == input_device->default_rate) {
LOG("Found matching device for %s: %s", input_device->friendly_name, LOG("Found matching device for %s: %s", input_device->friendly_name,
dev.friendly_name); dev.friendly_name);
stm->output_device_id = matched_output = dev.devid;
utf8_to_wstr(reinterpret_cast<char const *>(dev.devid)); break;
}
} }
} }
wasapi_device_collection_destroy(stm->context, &collection); wasapi_device_collection_destroy(stm->context, &collection);
return matched_output;
}
std::unique_ptr<wchar_t[]>
copy_wide_string(const wchar_t * src)
{
XASSERT(src);
size_t len = wcslen(src);
std::unique_ptr<wchar_t[]> copy(new wchar_t[len + 1]);
if (wcsncpy_s(copy.get(), len + 1, src, len) != 0) {
return nullptr;
}
return copy;
} }
int int
@ -2325,6 +2421,17 @@ setup_wasapi_stream(cubeb_stream * stm)
XASSERT((!stm->output_client || !stm->input_client) && XASSERT((!stm->output_client || !stm->input_client) &&
"WASAPI stream already setup, close it first."); "WASAPI stream already setup, close it first.");
std::unique_ptr<const wchar_t[]> selected_output_device_id;
if (stm->output_device_id) {
if (std::unique_ptr<wchar_t[]> tmp =
move(copy_wide_string(stm->output_device_id.get()))) {
selected_output_device_id = move(tmp);
} else {
LOG("Failed to copy output device identifier.");
return CUBEB_ERROR;
}
}
if (has_input(stm)) { if (has_input(stm)) {
LOG("(%p) Setup capture: device=%p", stm, stm->input_device_id.get()); LOG("(%p) Setup capture: device=%p", stm, stm->input_device_id.get());
rv = setup_wasapi_stream_one_side( rv = setup_wasapi_stream_one_side(
@ -2356,8 +2463,12 @@ setup_wasapi_stream(cubeb_stream * stm)
// device, and the default device is the same bluetooth device, pick the // device, and the default device is the same bluetooth device, pick the
// right output device, running at the same rate and with the same protocol // right output device, running at the same rate and with the same protocol
// as the input. // as the input.
if (!stm->output_device_id) { if (!selected_output_device_id) {
wasapi_find_matching_output_device(stm); cubeb_devid matched = wasapi_find_bt_handsfree_output_device(stm);
if (matched) {
selected_output_device_id =
move(utf8_to_wstr(reinterpret_cast<char const *>(matched)));
}
} }
} }
@ -2371,23 +2482,24 @@ setup_wasapi_stream(cubeb_stream * stm)
stm->output_stream_params.channels = stm->input_stream_params.channels; stm->output_stream_params.channels = stm->input_stream_params.channels;
stm->output_stream_params.layout = stm->input_stream_params.layout; stm->output_stream_params.layout = stm->input_stream_params.layout;
if (stm->input_device_id) { if (stm->input_device_id) {
size_t len = wcslen(stm->input_device_id.get()); if (std::unique_ptr<wchar_t[]> tmp =
std::unique_ptr<wchar_t[]> tmp(new wchar_t[len + 1]); move(copy_wide_string(stm->input_device_id.get()))) {
if (wcsncpy_s(tmp.get(), len + 1, stm->input_device_id.get(), len) != 0) { XASSERT(!selected_output_device_id);
selected_output_device_id = move(tmp);
} else {
LOG("Failed to copy device identifier while copying input stream " LOG("Failed to copy device identifier while copying input stream "
"configuration to output stream configuration to drive loopback."); "configuration to output stream configuration to drive loopback.");
return CUBEB_ERROR; return CUBEB_ERROR;
} }
stm->output_device_id = move(tmp);
} }
stm->has_dummy_output = true; stm->has_dummy_output = true;
} }
if (has_output(stm)) { if (has_output(stm)) {
LOG("(%p) Setup render: device=%p", stm, stm->output_device_id.get()); LOG("(%p) Setup render: device=%p", stm, selected_output_device_id.get());
rv = setup_wasapi_stream_one_side( rv = setup_wasapi_stream_one_side(
stm, &stm->output_stream_params, stm->output_device_id.get(), eRender, stm, &stm->output_stream_params, selected_output_device_id.get(),
__uuidof(IAudioRenderClient), stm->output_client, eRender, __uuidof(IAudioRenderClient), stm->output_client,
&stm->output_buffer_frame_count, stm->refill_event, stm->render_client, &stm->output_buffer_frame_count, stm->refill_event, stm->render_client,
&stm->output_mix_params, stm->output_device); &stm->output_mix_params, stm->output_device);
if (rv != CUBEB_OK) { if (rv != CUBEB_OK) {
@ -2448,10 +2560,11 @@ setup_wasapi_stream(cubeb_stream * stm)
stm->resampler.reset(cubeb_resampler_create( stm->resampler.reset(cubeb_resampler_create(
stm, has_input(stm) ? &input_params : nullptr, stm, has_input(stm) ? &input_params : nullptr,
has_output(stm) ? &output_params : nullptr, target_sample_rate, has_output(stm) && !stm->has_dummy_output ? &output_params : nullptr,
stm->data_callback, stm->user_ptr, target_sample_rate, stm->data_callback, stm->user_ptr,
stm->voice ? CUBEB_RESAMPLER_QUALITY_VOIP stm->voice ? CUBEB_RESAMPLER_QUALITY_VOIP
: CUBEB_RESAMPLER_QUALITY_DESKTOP)); : CUBEB_RESAMPLER_QUALITY_DESKTOP,
CUBEB_RESAMPLER_RECLOCK_NONE));
if (!stm->resampler) { if (!stm->resampler) {
LOG("Could not get a resampler"); LOG("Could not get a resampler");
return CUBEB_ERROR; return CUBEB_ERROR;
@ -2616,12 +2729,15 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
return rv; return rv;
} }
if (!((input_stream_params ? (input_stream_params->prefs & // Follow the system default devices when not specifying devices explicitly
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING) // and CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING is not set.
: 0) || if ((!input_device && input_stream_params &&
(output_stream_params ? (output_stream_params->prefs & !(input_stream_params->prefs &
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING) CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING)) ||
: 0))) { (!output_device && output_stream_params &&
!(output_stream_params->prefs &
CUBEB_STREAM_PREF_DISABLE_DEVICE_SWITCHING))) {
LOG("Follow the system default input or/and output devices");
HRESULT hr = register_notification_client(stm.get()); HRESULT hr = register_notification_client(stm.get());
if (FAILED(hr)) { if (FAILED(hr)) {
/* this is not fatal, we can still play audio, but we won't be able /* this is not fatal, we can still play audio, but we won't be able
@ -3011,31 +3127,30 @@ static com_ptr<IMMDevice> wasapi_get_device_node(
return ret; return ret;
} }
static BOOL static com_heap_ptr<wchar_t>
wasapi_is_default_device(EDataFlow flow, ERole role, LPCWSTR device_id, wasapi_get_default_device_id(EDataFlow flow, ERole role,
IMMDeviceEnumerator * enumerator) IMMDeviceEnumerator * enumerator)
{ {
BOOL ret = FALSE;
com_ptr<IMMDevice> dev; com_ptr<IMMDevice> dev;
HRESULT hr;
hr = enumerator->GetDefaultAudioEndpoint(flow, role, dev.receive()); HRESULT hr = enumerator->GetDefaultAudioEndpoint(flow, role, dev.receive());
if (SUCCEEDED(hr)) { if (SUCCEEDED(hr)) {
wchar_t * tmp = nullptr; wchar_t * tmp = nullptr;
if (SUCCEEDED(dev->GetId(&tmp))) { if (SUCCEEDED(dev->GetId(&tmp))) {
com_heap_ptr<wchar_t> defdevid(tmp); com_heap_ptr<wchar_t> devid(tmp);
ret = (wcscmp(defdevid.get(), device_id) == 0); return devid;
} }
} }
return ret; return nullptr;
} }
/* `ret` must be deallocated with `wasapi_destroy_device`, iff the return value /* `ret` must be deallocated with `wasapi_destroy_device`, iff the return value
* of this function is `CUBEB_OK`. */ * of this function is `CUBEB_OK`. */
int int
wasapi_create_device(cubeb * ctx, cubeb_device_info & ret, wasapi_create_device(cubeb * ctx, cubeb_device_info & ret,
IMMDeviceEnumerator * enumerator, IMMDevice * dev) IMMDeviceEnumerator * enumerator, IMMDevice * dev,
wasapi_default_devices * defaults)
{ {
com_ptr<IMMEndpoint> endpoint; com_ptr<IMMEndpoint> endpoint;
com_ptr<IMMDevice> devnode; com_ptr<IMMDevice> devnode;
@ -3046,6 +3161,8 @@ wasapi_create_device(cubeb * ctx, cubeb_device_info & ret,
REFERENCE_TIME def_period, min_period; REFERENCE_TIME def_period, min_period;
HRESULT hr; HRESULT hr;
XASSERT(enumerator && dev && defaults);
// zero-out to be able to safely delete the pointers to friendly_name and // zero-out to be able to safely delete the pointers to friendly_name and
// group_id at all time in this function. // group_id at all time in this function.
PodZero(&ret, 1); PodZero(&ret, 1);
@ -3133,19 +3250,14 @@ wasapi_create_device(cubeb * ctx, cubeb_device_info & ret,
} }
ret.preferred = CUBEB_DEVICE_PREF_NONE; ret.preferred = CUBEB_DEVICE_PREF_NONE;
if (wasapi_is_default_device(flow, eConsole, device_id.get(), enumerator)) { if (defaults->is_default(flow, eConsole, device_id.get())) {
ret.preferred = ret.preferred =
(cubeb_device_pref)(ret.preferred | CUBEB_DEVICE_PREF_MULTIMEDIA); (cubeb_device_pref)(ret.preferred | CUBEB_DEVICE_PREF_MULTIMEDIA |
} CUBEB_DEVICE_PREF_NOTIFICATION);
if (wasapi_is_default_device(flow, eCommunications, device_id.get(), } else if (defaults->is_default(flow, eCommunications, device_id.get())) {
enumerator)) {
ret.preferred = ret.preferred =
(cubeb_device_pref)(ret.preferred | CUBEB_DEVICE_PREF_VOICE); (cubeb_device_pref)(ret.preferred | CUBEB_DEVICE_PREF_VOICE);
} }
if (wasapi_is_default_device(flow, eConsole, device_id.get(), enumerator)) {
ret.preferred =
(cubeb_device_pref)(ret.preferred | CUBEB_DEVICE_PREF_NOTIFICATION);
}
if (flow == eRender) { if (flow == eRender) {
ret.type = CUBEB_DEVICE_TYPE_OUTPUT; ret.type = CUBEB_DEVICE_TYPE_OUTPUT;
@ -3229,14 +3341,17 @@ wasapi_enumerate_devices(cubeb * context, cubeb_device_type type,
return CUBEB_ERROR; return CUBEB_ERROR;
} }
if (type == CUBEB_DEVICE_TYPE_OUTPUT) wasapi_default_devices default_devices(enumerator.get());
if (type == CUBEB_DEVICE_TYPE_OUTPUT) {
flow = eRender; flow = eRender;
else if (type == CUBEB_DEVICE_TYPE_INPUT) } else if (type == CUBEB_DEVICE_TYPE_INPUT) {
flow = eCapture; flow = eCapture;
else if (type & (CUBEB_DEVICE_TYPE_INPUT | CUBEB_DEVICE_TYPE_OUTPUT)) } else if (type & (CUBEB_DEVICE_TYPE_INPUT | CUBEB_DEVICE_TYPE_OUTPUT)) {
flow = eAll; flow = eAll;
else } else {
return CUBEB_ERROR; return CUBEB_ERROR;
}
hr = enumerator->EnumAudioEndpoints(flow, DEVICE_STATEMASK_ALL, hr = enumerator->EnumAudioEndpoints(flow, DEVICE_STATEMASK_ALL,
collection.receive()); collection.receive());
@ -3264,7 +3379,7 @@ wasapi_enumerate_devices(cubeb * context, cubeb_device_type type,
continue; continue;
} }
if (wasapi_create_device(context, devices[out->count], enumerator.get(), if (wasapi_create_device(context, devices[out->count], enumerator.get(),
dev.get()) == CUBEB_OK) { dev.get(), &default_devices) == CUBEB_OK) {
out->count += 1; out->count += 1;
} }
} }
@ -3294,6 +3409,7 @@ wasapi_register_device_collection_changed(
cubeb_device_collection_changed_callback collection_changed_callback, cubeb_device_collection_changed_callback collection_changed_callback,
void * user_ptr) void * user_ptr)
{ {
auto_lock lock(context->lock);
if (devtype == CUBEB_DEVICE_TYPE_UNKNOWN) { if (devtype == CUBEB_DEVICE_TYPE_UNKNOWN) {
return CUBEB_ERROR_INVALID_PARAMETER; return CUBEB_ERROR_INVALID_PARAMETER;
} }

View file

@ -73,17 +73,6 @@
#define CUBEB_STREAM_MAX 32 #define CUBEB_STREAM_MAX 32
#define NBUFS 4 #define NBUFS 4
const GUID KSDATAFORMAT_SUBTYPE_PCM = {
0x00000001,
0x0000,
0x0010,
{0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
const GUID KSDATAFORMAT_SUBTYPE_IEEE_FLOAT = {
0x00000003,
0x0000,
0x0010,
{0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}};
struct cubeb_stream_item { struct cubeb_stream_item {
SLIST_ENTRY head; SLIST_ENTRY head;
cubeb_stream * stream; cubeb_stream * stream;

View file

@ -137,26 +137,20 @@ void device_collection_changed_callback(cubeb * context, void * user)
" called when opening a stream"; " called when opening a stream";
} }
TEST(cubeb, duplex_collection_change) void
duplex_collection_change_impl(cubeb * ctx)
{ {
cubeb *ctx;
cubeb_stream * stream; cubeb_stream * stream;
cubeb_stream_params input_params; cubeb_stream_params input_params;
cubeb_stream_params output_params; cubeb_stream_params output_params;
int r; int r;
uint32_t latency_frames = 0; uint32_t latency_frames = 0;
r = common_init(&ctx, "Cubeb duplex example with collection change"); r = cubeb_register_device_collection_changed(
ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb library"; ctx, static_cast<cubeb_device_type>(CUBEB_DEVICE_TYPE_INPUT),
device_collection_changed_callback, nullptr);
r = cubeb_register_device_collection_changed(ctx,
static_cast<cubeb_device_type>(CUBEB_DEVICE_TYPE_INPUT),
device_collection_changed_callback,
nullptr);
ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb stream"; ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb stream";
std::unique_ptr<cubeb, decltype(&cubeb_destroy)>
cleanup_cubeb_at_exit(ctx, cubeb_destroy);
/* typical user-case: mono input, stereo output, low latency. */ /* typical user-case: mono input, stereo output, low latency. */
input_params.format = STREAM_FORMAT; input_params.format = STREAM_FORMAT;
@ -173,13 +167,43 @@ TEST(cubeb, duplex_collection_change)
r = cubeb_get_min_latency(ctx, &output_params, &latency_frames); r = cubeb_get_min_latency(ctx, &output_params, &latency_frames);
ASSERT_EQ(r, CUBEB_OK) << "Could not get minimal latency"; ASSERT_EQ(r, CUBEB_OK) << "Could not get minimal latency";
r = cubeb_stream_init(ctx, &stream, "Cubeb duplex", r = cubeb_stream_init(ctx, &stream, "Cubeb duplex", NULL, &input_params, NULL,
NULL, &input_params, NULL, &output_params, &output_params, latency_frames, data_cb_duplex,
latency_frames, data_cb_duplex, state_cb_duplex, nullptr); state_cb_duplex, nullptr);
ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb stream"; ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb stream";
cubeb_stream_destroy(stream); cubeb_stream_destroy(stream);
} }
TEST(cubeb, duplex_collection_change)
{
cubeb * ctx;
int r;
r = common_init(&ctx, "Cubeb duplex example with collection change");
ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb library";
std::unique_ptr<cubeb, decltype(&cubeb_destroy)> cleanup_cubeb_at_exit(
ctx, cubeb_destroy);
duplex_collection_change_impl(ctx);
r = cubeb_register_device_collection_changed(
ctx, static_cast<cubeb_device_type>(CUBEB_DEVICE_TYPE_INPUT), nullptr,
nullptr);
ASSERT_EQ(r, CUBEB_OK);
}
TEST(cubeb, duplex_collection_change_no_unregister)
{
cubeb * ctx;
int r;
r = common_init(&ctx, "Cubeb duplex example with collection change");
ASSERT_EQ(r, CUBEB_OK) << "Error initializing cubeb library";
std::unique_ptr<cubeb, decltype(&cubeb_destroy)> cleanup_cubeb_at_exit(
ctx, [](cubeb * p) noexcept { EXPECT_DEATH(cubeb_destroy(p), ""); });
duplex_collection_change_impl(ctx);
}
long data_cb_input(cubeb_stream * stream, void * user, const void * inputbuffer, void * outputbuffer, long nframes) long data_cb_input(cubeb_stream * stream, void * user, const void * inputbuffer, void * outputbuffer, long nframes)
{ {
if (stream == NULL || inputbuffer == NULL || outputbuffer != NULL) { if (stream == NULL || inputbuffer == NULL || outputbuffer != NULL) {

View file

@ -338,7 +338,8 @@ void test_resampler_duplex(uint32_t input_channels, uint32_t output_channels,
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, &output_params, target_rate, cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, &output_params, target_rate,
data_cb_resampler, (void*)&state, CUBEB_RESAMPLER_QUALITY_VOIP); data_cb_resampler, (void*)&state, CUBEB_RESAMPLER_QUALITY_VOIP,
CUBEB_RESAMPLER_RECLOCK_NONE);
long latency = cubeb_resampler_latency(resampler); long latency = cubeb_resampler_latency(resampler);
@ -484,8 +485,8 @@ TEST(cubeb, resampler_output_only_noop)
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, nullptr, &output_params, target_rate, cubeb_resampler_create((cubeb_stream*)nullptr, nullptr, &output_params, target_rate,
test_output_only_noop_data_cb, nullptr, test_output_only_noop_data_cb, nullptr,
CUBEB_RESAMPLER_QUALITY_VOIP); CUBEB_RESAMPLER_QUALITY_VOIP,
CUBEB_RESAMPLER_RECLOCK_NONE);
const long out_frames = 128; const long out_frames = 128;
float out_buffer[out_frames]; float out_buffer[out_frames];
long got; long got;
@ -523,7 +524,8 @@ TEST(cubeb, resampler_drain)
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, nullptr, &output_params, target_rate, cubeb_resampler_create((cubeb_stream*)nullptr, nullptr, &output_params, target_rate,
test_drain_data_cb, &cb_count, test_drain_data_cb, &cb_count,
CUBEB_RESAMPLER_QUALITY_VOIP); CUBEB_RESAMPLER_QUALITY_VOIP,
CUBEB_RESAMPLER_RECLOCK_NONE);
const long out_frames = 128; const long out_frames = 128;
float out_buffer[out_frames]; float out_buffer[out_frames];
@ -572,7 +574,8 @@ TEST(cubeb, resampler_passthrough_output_only)
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, nullptr, &output_params, cubeb_resampler_create((cubeb_stream*)nullptr, nullptr, &output_params,
target_rate, cb_passthrough_resampler_output, nullptr, target_rate, cb_passthrough_resampler_output, nullptr,
CUBEB_RESAMPLER_QUALITY_VOIP); CUBEB_RESAMPLER_QUALITY_VOIP,
CUBEB_RESAMPLER_RECLOCK_NONE);
float output_buffer[output_channels * 256]; float output_buffer[output_channels * 256];
@ -616,7 +619,8 @@ TEST(cubeb, resampler_passthrough_input_only)
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, nullptr, cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, nullptr,
target_rate, cb_passthrough_resampler_input, nullptr, target_rate, cb_passthrough_resampler_input, nullptr,
CUBEB_RESAMPLER_QUALITY_VOIP); CUBEB_RESAMPLER_QUALITY_VOIP,
CUBEB_RESAMPLER_RECLOCK_NONE);
float input_buffer[input_channels * 256]; float input_buffer[input_channels * 256];
@ -737,7 +741,8 @@ TEST(cubeb, resampler_passthrough_duplex_callback_reordering)
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, &output_params, cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, &output_params,
target_rate, cb_passthrough_resampler_duplex, &c, target_rate, cb_passthrough_resampler_duplex, &c,
CUBEB_RESAMPLER_QUALITY_VOIP); CUBEB_RESAMPLER_QUALITY_VOIP,
CUBEB_RESAMPLER_RECLOCK_NONE);
const long BUF_BASE_SIZE = 256; const long BUF_BASE_SIZE = 256;
float input_buffer_prebuffer[input_channels * BUF_BASE_SIZE * 2]; float input_buffer_prebuffer[input_channels * BUF_BASE_SIZE * 2];
@ -820,7 +825,7 @@ TEST(cubeb, resampler_drift_drop_data)
cubeb_resampler * resampler = cubeb_resampler * resampler =
cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, &output_params, cubeb_resampler_create((cubeb_stream*)nullptr, &input_params, &output_params,
target_rate, cb_passthrough_resampler_duplex, &c, target_rate, cb_passthrough_resampler_duplex, &c,
CUBEB_RESAMPLER_QUALITY_VOIP); CUBEB_RESAMPLER_QUALITY_VOIP, CUBEB_RESAMPLER_RECLOCK_NONE);
const long BUF_BASE_SIZE = 256; const long BUF_BASE_SIZE = 256;

View file

@ -7,6 +7,7 @@
#include <cstring> #include <cstring>
#include <inttypes.h> #include <inttypes.h>
#include <iostream> #include <iostream>
#include <vector>
#ifdef _WIN32 #ifdef _WIN32
#include <objbase.h> // Used by CoInitialize() #include <objbase.h> // Used by CoInitialize()
#endif #endif
@ -35,6 +36,32 @@ static const char* state_to_string(cubeb_state state) {
} }
} }
static const char* device_type_to_string(cubeb_device_type type) {
switch (type) {
case CUBEB_DEVICE_TYPE_INPUT:
return "input";
case CUBEB_DEVICE_TYPE_OUTPUT:
return "output";
case CUBEB_DEVICE_TYPE_UNKNOWN:
return "unknown";
default:
assert(false);
}
}
static const char* device_state_to_string(cubeb_device_state state) {
switch (state) {
case CUBEB_DEVICE_STATE_DISABLED:
return "disabled";
case CUBEB_DEVICE_STATE_UNPLUGGED:
return "unplugged";
case CUBEB_DEVICE_STATE_ENABLED:
return "enabled";
default:
assert(false);
}
}
void print_log(const char* msg, ...) { void print_log(const char* msg, ...) {
va_list args; va_list args;
va_start(args, msg); va_start(args, msg);
@ -48,6 +75,7 @@ public:
~cubeb_client() {} ~cubeb_client() {}
bool init(char const * backend_name = nullptr); bool init(char const * backend_name = nullptr);
cubeb_devid select_device(cubeb_device_type type) const;
bool init_stream(); bool init_stream();
bool start_stream(); bool start_stream();
bool stop_stream(); bool stop_stream();
@ -70,7 +98,10 @@ public:
bool unregister_device_collection_changed(cubeb_device_type devtype) const; bool unregister_device_collection_changed(cubeb_device_type devtype) const;
cubeb_stream_params output_params = {}; cubeb_stream_params output_params = {};
cubeb_devid output_device = nullptr;
cubeb_stream_params input_params = {}; cubeb_stream_params input_params = {};
cubeb_devid input_device = nullptr;
void force_drain() { _force_drain = true; } void force_drain() { _force_drain = true; }
@ -81,8 +112,6 @@ private:
cubeb* context = nullptr; cubeb* context = nullptr;
cubeb_stream* stream = nullptr; cubeb_stream* stream = nullptr;
cubeb_devid output_device = nullptr;
cubeb_devid input_device = nullptr;
/* Accessed only from client and audio thread. */ /* Accessed only from client and audio thread. */
std::atomic<uint32_t> _rate = {0}; std::atomic<uint32_t> _rate = {0};
@ -492,6 +521,56 @@ bool choose_action(cubeb_client& cl, operation_data * op, int c) {
return true; // Loop up return true; // Loop up
} }
cubeb_devid cubeb_client::select_device(cubeb_device_type type) const
{
assert(type == CUBEB_DEVICE_TYPE_INPUT || type == CUBEB_DEVICE_TYPE_OUTPUT);
cubeb_device_collection collection;
if (cubeb_enumerate_devices(context, type, &collection) ==
CUBEB_ERROR_NOT_SUPPORTED) {
fprintf(stderr,
"Not support %s device selection. Force to use default device\n",
device_type_to_string(type));
return nullptr;
}
assert(collection.count);
fprintf(stderr, "Found %zu %s devices. Choose one:\n", collection.count,
device_type_to_string(type));
std::vector<cubeb_devid> devices;
devices.emplace_back(nullptr);
fprintf(stderr, "# 0\n\tname: system default device\n");
for (size_t i = 0; i < collection.count; i++) {
assert(collection.device[i].type == type);
fprintf(stderr,
"# %zu %s\n"
"\tname: %s\n"
"\tdevice id: %s\n"
"\tmax channels: %u\n"
"\tstate: %s\n",
devices.size(),
collection.device[i].preferred ? " (PREFERRED)" : "",
collection.device[i].friendly_name, collection.device[i].device_id,
collection.device[i].max_channels,
device_state_to_string(collection.device[i].state));
devices.emplace_back(collection.device[i].devid);
}
cubeb_device_collection_destroy(context, &collection);
size_t number;
std::cout << "Enter device number: ";
std::cin >> number;
while (!std::cin || number >= devices.size()) {
std::cin.clear();
std::cin.ignore(100, '\n');
std::cout << "Error: Please enter a valid numeric input. Enter again: ";
std::cin >> number;
}
return devices[number];
}
int main(int argc, char* argv[]) { int main(int argc, char* argv[]) {
#ifdef _WIN32 #ifdef _WIN32
CoInitialize(nullptr); CoInitialize(nullptr);
@ -523,7 +602,7 @@ int main(int argc, char* argv[]) {
bool res = false; bool res = false;
cubeb_client cl; cubeb_client cl;
cl.activate_log(CUBEB_LOG_DISABLED); cl.activate_log(CUBEB_LOG_NORMAL);
fprintf(stderr, "Log level is DISABLED\n"); fprintf(stderr, "Log level is DISABLED\n");
cl.init(/* default backend */); cl.init(/* default backend */);
@ -540,10 +619,12 @@ int main(int argc, char* argv[]) {
} }
} else { } else {
if (op.pm == PLAYBACK || op.pm == DUPLEX || op.pm == LATENCY_TESTING) { if (op.pm == PLAYBACK || op.pm == DUPLEX || op.pm == LATENCY_TESTING) {
cl.output_device = cl.select_device(CUBEB_DEVICE_TYPE_OUTPUT);
cl.output_params = {CUBEB_SAMPLE_FLOAT32NE, op.rate, DEFAULT_OUTPUT_CHANNELS, cl.output_params = {CUBEB_SAMPLE_FLOAT32NE, op.rate, DEFAULT_OUTPUT_CHANNELS,
CUBEB_LAYOUT_STEREO, CUBEB_STREAM_PREF_NONE}; CUBEB_LAYOUT_STEREO, CUBEB_STREAM_PREF_NONE};
} }
if (op.pm == RECORD || op.pm == DUPLEX || op.pm == LATENCY_TESTING) { if (op.pm == RECORD || op.pm == DUPLEX || op.pm == LATENCY_TESTING) {
cl.input_device = cl.select_device(CUBEB_DEVICE_TYPE_INPUT);
cl.input_params = {CUBEB_SAMPLE_FLOAT32NE, op.rate, DEFAULT_INPUT_CHANNELS, CUBEB_LAYOUT_UNDEFINED, CUBEB_STREAM_PREF_NONE}; cl.input_params = {CUBEB_SAMPLE_FLOAT32NE, op.rate, DEFAULT_INPUT_CHANNELS, CUBEB_LAYOUT_UNDEFINED, CUBEB_STREAM_PREF_NONE};
} }
if (op.pm == LATENCY_TESTING) { if (op.pm == LATENCY_TESTING) {

View file

@ -1,54 +1,214 @@
add_library(audio_core STATIC add_library(audio_core STATIC
algorithm/filter.cpp audio_core.cpp
algorithm/filter.h audio_core.h
algorithm/interpolate.cpp audio_event.h
algorithm/interpolate.h audio_event.cpp
audio_out.cpp audio_render_manager.cpp
audio_out.h audio_render_manager.h
audio_renderer.cpp audio_in_manager.cpp
audio_renderer.h audio_in_manager.h
behavior_info.cpp audio_out_manager.cpp
behavior_info.h audio_out_manager.h
buffer.h audio_manager.cpp
codec.cpp audio_manager.h
codec.h common/audio_renderer_parameter.h
command_generator.cpp common/common.h
command_generator.h common/feature_support.h
common.h common/wave_buffer.h
delay_line.cpp common/workbuffer_allocator.h
delay_line.h device/audio_buffer.h
effect_context.cpp device/audio_buffers.h
effect_context.h device/device_session.cpp
info_updater.cpp device/device_session.h
info_updater.h in/audio_in.cpp
memory_pool.cpp in/audio_in.h
memory_pool.h in/audio_in_system.cpp
mix_context.cpp in/audio_in_system.h
mix_context.h out/audio_out.cpp
null_sink.h out/audio_out.h
sink.h out/audio_out_system.cpp
sink_context.cpp out/audio_out_system.h
sink_context.h renderer/adsp/adsp.cpp
sink_details.cpp renderer/adsp/adsp.h
sink_details.h renderer/adsp/audio_renderer.cpp
sink_stream.h renderer/adsp/audio_renderer.h
splitter_context.cpp renderer/adsp/command_buffer.h
splitter_context.h renderer/adsp/command_list_processor.cpp
stream.cpp renderer/adsp/command_list_processor.h
stream.h renderer/audio_device.cpp
voice_context.cpp renderer/audio_device.h
voice_context.h renderer/audio_renderer.h
renderer/audio_renderer.cpp
$<$<BOOL:${ENABLE_CUBEB}>:cubeb_sink.cpp cubeb_sink.h> renderer/behavior/behavior_info.cpp
$<$<BOOL:${ENABLE_SDL2}>:sdl2_sink.cpp sdl2_sink.h> renderer/behavior/behavior_info.h
renderer/behavior/info_updater.cpp
renderer/behavior/info_updater.h
renderer/command/data_source/adpcm.cpp
renderer/command/data_source/adpcm.h
renderer/command/data_source/decode.cpp
renderer/command/data_source/decode.h
renderer/command/data_source/pcm_float.cpp
renderer/command/data_source/pcm_float.h
renderer/command/data_source/pcm_int16.cpp
renderer/command/data_source/pcm_int16.h
renderer/command/effect/aux_.cpp
renderer/command/effect/aux_.h
renderer/command/effect/biquad_filter.cpp
renderer/command/effect/biquad_filter.h
renderer/command/effect/capture.cpp
renderer/command/effect/capture.h
renderer/command/effect/delay.cpp
renderer/command/effect/delay.h
renderer/command/effect/i3dl2_reverb.cpp
renderer/command/effect/i3dl2_reverb.h
renderer/command/effect/light_limiter.cpp
renderer/command/effect/light_limiter.h
renderer/command/effect/multi_tap_biquad_filter.cpp
renderer/command/effect/multi_tap_biquad_filter.h
renderer/command/effect/reverb.cpp
renderer/command/effect/reverb.h
renderer/command/mix/clear_mix.cpp
renderer/command/mix/clear_mix.h
renderer/command/mix/copy_mix.cpp
renderer/command/mix/copy_mix.h
renderer/command/mix/depop_for_mix_buffers.cpp
renderer/command/mix/depop_for_mix_buffers.h
renderer/command/mix/depop_prepare.cpp
renderer/command/mix/depop_prepare.h
renderer/command/mix/mix.cpp
renderer/command/mix/mix.h
renderer/command/mix/mix_ramp.cpp
renderer/command/mix/mix_ramp.h
renderer/command/mix/mix_ramp_grouped.cpp
renderer/command/mix/mix_ramp_grouped.h
renderer/command/mix/volume.cpp
renderer/command/mix/volume.h
renderer/command/mix/volume_ramp.cpp
renderer/command/mix/volume_ramp.h
renderer/command/performance/performance.cpp
renderer/command/performance/performance.h
renderer/command/resample/downmix_6ch_to_2ch.cpp
renderer/command/resample/downmix_6ch_to_2ch.h
renderer/command/resample/resample.h
renderer/command/resample/resample.cpp
renderer/command/resample/upsample.cpp
renderer/command/resample/upsample.h
renderer/command/sink/device.cpp
renderer/command/sink/device.h
renderer/command/sink/circular_buffer.cpp
renderer/command/sink/circular_buffer.h
renderer/command/command_buffer.cpp
renderer/command/command_buffer.h
renderer/command/command_generator.cpp
renderer/command/command_generator.h
renderer/command/command_list_header.h
renderer/command/command_processing_time_estimator.cpp
renderer/command/command_processing_time_estimator.h
renderer/command/commands.h
renderer/command/icommand.h
renderer/effect/effect_aux_info.cpp
renderer/effect/effect_aux_info.h
renderer/effect/effect_biquad_filter_info.cpp
renderer/effect/effect_biquad_filter_info.h
renderer/effect/effect_buffer_mixer_info.cpp
renderer/effect/effect_buffer_mixer_info.h
renderer/effect/effect_capture_info.cpp
renderer/effect/effect_capture_info.h
renderer/effect/effect_context.cpp
renderer/effect/effect_context.h
renderer/effect/effect_delay_info.cpp
renderer/effect/effect_delay_info.h
renderer/effect/effect_i3dl2_info.cpp
renderer/effect/effect_i3dl2_info.h
renderer/effect/effect_reset.h
renderer/effect/effect_info_base.h
renderer/effect/effect_light_limiter_info.cpp
renderer/effect/effect_light_limiter_info.h
renderer/effect/effect_result_state.h
renderer/effect/effect_reverb_info.h
renderer/effect/effect_reverb_info.cpp
renderer/mix/mix_context.cpp
renderer/mix/mix_context.h
renderer/mix/mix_info.cpp
renderer/mix/mix_info.h
renderer/memory/address_info.h
renderer/memory/memory_pool_info.cpp
renderer/memory/memory_pool_info.h
renderer/memory/pool_mapper.cpp
renderer/memory/pool_mapper.h
renderer/nodes/bit_array.h
renderer/nodes/edge_matrix.cpp
renderer/nodes/edge_matrix.h
renderer/nodes/node_states.cpp
renderer/nodes/node_states.h
renderer/performance/detail_aspect.cpp
renderer/performance/detail_aspect.h
renderer/performance/entry_aspect.cpp
renderer/performance/entry_aspect.h
renderer/performance/performance_detail.h
renderer/performance/performance_entry.h
renderer/performance/performance_entry_addresses.h
renderer/performance/performance_frame_header.h
renderer/performance/performance_manager.cpp
renderer/performance/performance_manager.h
renderer/sink/circular_buffer_sink_info.cpp
renderer/sink/circular_buffer_sink_info.h
renderer/sink/device_sink_info.cpp
renderer/sink/device_sink_info.h
renderer/sink/sink_context.cpp
renderer/sink/sink_context.h
renderer/sink/sink_info_base.cpp
renderer/sink/sink_info_base.h
renderer/splitter/splitter_context.cpp
renderer/splitter/splitter_context.h
renderer/splitter/splitter_destinations_data.cpp
renderer/splitter/splitter_destinations_data.h
renderer/splitter/splitter_info.cpp
renderer/splitter/splitter_info.h
renderer/system.cpp
renderer/system.h
renderer/system_manager.cpp
renderer/system_manager.h
renderer/upsampler/upsampler_info.h
renderer/upsampler/upsampler_manager.cpp
renderer/upsampler/upsampler_manager.h
renderer/upsampler/upsampler_state.h
renderer/voice/voice_channel_resource.h
renderer/voice/voice_context.cpp
renderer/voice/voice_context.h
renderer/voice/voice_info.cpp
renderer/voice/voice_info.h
renderer/voice/voice_state.h
sink/cubeb_sink.cpp
sink/cubeb_sink.h
sink/null_sink.h
sink/sdl2_sink.cpp
sink/sdl2_sink.h
sink/sink.h
sink/sink_details.cpp
sink/sink_details.h
sink/sink_stream.h
) )
create_target_directory_groups(audio_core) create_target_directory_groups(audio_core)
if (NOT MSVC) if (MSVC)
target_compile_options(audio_core PRIVATE
/we4242 # 'identifier': conversion from 'type1' to 'type2', possible loss of data
/we4244 # 'conversion': conversion from 'type1' to 'type2', possible loss of data
/we4245 # 'conversion': conversion from 'type1' to 'type2', signed/unsigned mismatch
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4456 # Declaration of 'identifier' hides previous local declaration
/we4457 # Declaration of 'identifier' hides function parameter
/we4458 # Declaration of 'identifier' hides class member
/we4459 # Declaration of 'identifier' hides global declaration
)
else()
target_compile_options(audio_core PRIVATE target_compile_options(audio_core PRIVATE
-Werror=conversion -Werror=conversion
-Werror=ignored-qualifiers -Werror=ignored-qualifiers
-Werror=shadow
-Werror=unused-variable
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter> $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable> $<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
@ -58,6 +218,9 @@ if (NOT MSVC)
endif() endif()
target_link_libraries(audio_core PUBLIC common core) target_link_libraries(audio_core PUBLIC common core)
if (ARCHITECTURE_x86_64)
target_link_libraries(audio_core PRIVATE dynarmic)
endif()
if(ENABLE_CUBEB) if(ENABLE_CUBEB)
target_link_libraries(audio_core PRIVATE cubeb) target_link_libraries(audio_core PRIVATE cubeb)

60
src/audio_core/audio_core.cpp Executable file
View file

@ -0,0 +1,60 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_core.h"
#include "audio_core/sink/sink_details.h"
#include "common/settings.h"
#include "core/core.h"
namespace AudioCore {
AudioCore::AudioCore(Core::System& system) : audio_manager{std::make_unique<AudioManager>(system)} {
CreateSinks();
// Must be created after the sinks
adsp = std::make_unique<AudioRenderer::ADSP::ADSP>(system, *output_sink);
}
AudioCore ::~AudioCore() {
Shutdown();
}
void AudioCore::CreateSinks() {
const auto& sink_id{Settings::values.sink_id};
const auto& audio_output_device_id{Settings::values.audio_output_device_id};
const auto& audio_input_device_id{Settings::values.audio_input_device_id};
output_sink = Sink::CreateSinkFromID(sink_id.GetValue(), audio_output_device_id.GetValue());
input_sink = Sink::CreateSinkFromID(sink_id.GetValue(), audio_input_device_id.GetValue());
}
void AudioCore::Shutdown() {
audio_manager->Shutdown();
}
AudioManager& AudioCore::GetAudioManager() {
return *audio_manager;
}
Sink::Sink& AudioCore::GetOutputSink() {
return *output_sink;
}
Sink::Sink& AudioCore::GetInputSink() {
return *input_sink;
}
AudioRenderer::ADSP::ADSP& AudioCore::GetADSP() {
return *adsp;
}
void AudioCore::PauseSinks(const bool pausing) const {
if (pausing) {
output_sink->PauseStreams();
input_sink->PauseStreams();
} else {
output_sink->UnpauseStreams();
input_sink->UnpauseStreams();
}
}
} // namespace AudioCore

84
src/audio_core/audio_core.h Executable file
View file

@ -0,0 +1,84 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include "audio_core/audio_manager.h"
#include "audio_core/renderer/adsp/adsp.h"
#include "audio_core/sink/sink.h"
namespace Core {
class System;
}
namespace AudioCore {
class AudioManager;
/**
* Main audio class, sotred inside the core, and holding the audio manager, all sinks, and the ADSP.
*/
class AudioCore {
public:
explicit AudioCore(Core::System& system);
~AudioCore();
/**
* Shutdown the audio core.
*/
void Shutdown();
/**
* Get a reference to the audio manager.
*
* @return Ref to the audio manager.
*/
AudioManager& GetAudioManager();
/**
* Get the audio output sink currently in use.
*
* @return Ref to the sink.
*/
Sink::Sink& GetOutputSink();
/**
* Get the audio input sink currently in use.
*
* @return Ref to the sink.
*/
Sink::Sink& GetInputSink();
/**
* Get the ADSP.
*
* @return Ref to the ADSP.
*/
AudioRenderer::ADSP::ADSP& GetADSP();
/**
* Pause the sink. Called from the core.
*
* @param pausing - Is this pause due to an actual pause, or shutdown?
* Unfortunately, shutdown also pauses streams, which can cause issues.
*/
void PauseSinks(bool pausing) const;
private:
/**
* Create the sinks on startup.
*/
void CreateSinks();
/// Main audio manager for audio in/out
std::unique_ptr<AudioManager> audio_manager;
/// Sink used for audio renderer and audio out
std::unique_ptr<Sink::Sink> output_sink;
/// Sink used for audio input
std::unique_ptr<Sink::Sink> input_sink;
/// The ADSP in the sysmodule
std::unique_ptr<AudioRenderer::ADSP::ADSP> adsp;
};
} // namespace AudioCore

61
src/audio_core/audio_event.cpp Executable file
View file

@ -0,0 +1,61 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_event.h"
#include "common/assert.h"
namespace AudioCore {
size_t Event::GetManagerIndex(const Type type) const {
switch (type) {
case Type::AudioInManager:
return 0;
case Type::AudioOutManager:
return 1;
case Type::FinalOutputRecorderManager:
return 2;
case Type::Max:
return 3;
default:
UNREACHABLE();
}
return 3;
}
void Event::SetAudioEvent(const Type type, const bool signalled) {
events_signalled[GetManagerIndex(type)] = signalled;
if (signalled) {
manager_event.notify_one();
}
}
bool Event::CheckAudioEventSet(const Type type) const {
return events_signalled[GetManagerIndex(type)];
}
std::mutex& Event::GetAudioEventLock() {
return event_lock;
}
std::condition_variable_any& Event::GetAudioEvent() {
return manager_event;
}
bool Event::Wait(std::unique_lock<std::mutex>& l, const std::chrono::seconds timeout) {
bool timed_out{false};
if (!manager_event.wait_for(l, timeout, [&]() {
return std::ranges::any_of(events_signalled, [](bool x) { return x; });
})) {
timed_out = true;
}
return timed_out;
}
void Event::ClearEvents() {
events_signalled[0] = false;
events_signalled[1] = false;
events_signalled[2] = false;
events_signalled[3] = false;
}
} // namespace AudioCore

92
src/audio_core/audio_event.h Executable file
View file

@ -0,0 +1,92 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <mutex>
namespace AudioCore {
/**
* Responsible for the input/output events, set by the stream backend when buffers are consumed, and
* waited on by the audio manager. These callbacks signal the game's events to keep the audio buffer
* recycling going.
* In a real Switch this is not a seprate class, and exists entirely within the audio manager.
* On the Switch it's implemented more simply through a MultiWaitEventHolder, where it can
* wait on multiple events at once, and the events are not needed by the backend.
*/
class Event {
public:
enum class Type {
AudioInManager,
AudioOutManager,
FinalOutputRecorderManager,
Max,
};
/**
* Convert a manager type to an index.
*
* @param type - The manager type to convert
* @return The index of the type.
*/
size_t GetManagerIndex(Type type) const;
/**
* Set an audio event to true or false.
*
* @param type - The manager type to signal.
* @param signalled - Its signal state.
*/
void SetAudioEvent(Type type, bool signalled);
/**
* Check if the given manager type is signalled.
*
* @param type - The manager type to check.
* @return True if the event is signalled, otherwise false.
*/
bool CheckAudioEventSet(Type type) const;
/**
* Get the lock for audio events.
*
* @return Reference to the lock.
*/
std::mutex& GetAudioEventLock();
/**
* Get the manager event, this signals the audio manager to release buffers and signal the game
* for more.
*
* @return Reference to the condition variable.
*/
std::condition_variable_any& GetAudioEvent();
/**
* Wait on the manager_event.
*
* @param l - Lock held by the wait.
* @param timeout - Timeout for the wait. This is 2 seconds by default.
* @return True if the wait timed out, otherwise false if signalled.
*/
bool Wait(std::unique_lock<std::mutex>& l, std::chrono::seconds timeout);
/**
* Reset all manager events.
*/
void ClearEvents();
private:
/// Lock, used bythe audio manager
std::mutex event_lock;
/// Array of events, one per system type (see Type), last event is used to terminate
std::array<std::atomic<bool>, 4> events_signalled;
/// Event to signal the audio manager
std::condition_variable_any manager_event;
};
} // namespace AudioCore

View file

@ -0,0 +1,86 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_core.h"
#include "audio_core/audio_in_manager.h"
#include "audio_core/audio_manager.h"
#include "audio_core/in/audio_in.h"
#include "core/core.h"
#include "core/hle/service/audio/errors.h"
namespace AudioCore::AudioIn {
Manager::Manager(Core::System& system_) : system{system_} {
std::iota(session_ids.begin(), session_ids.end(), 0);
num_free_sessions = MaxInSessions;
}
Result Manager::AcquireSessionId(size_t& session_id) {
if (num_free_sessions == 0) {
LOG_ERROR(Service_Audio, "All 4 AudioIn sessions are in use, cannot create any more");
return Service::Audio::ERR_MAXIMUM_SESSIONS_REACHED;
}
session_id = session_ids[next_session_id];
next_session_id = (next_session_id + 1) % MaxInSessions;
num_free_sessions--;
return ResultSuccess;
}
void Manager::ReleaseSessionId(const size_t session_id) {
std::scoped_lock l{mutex};
LOG_DEBUG(Service_Audio, "Freeing AudioIn session {}", session_id);
session_ids[free_session_id] = session_id;
num_free_sessions++;
free_session_id = (free_session_id + 1) % MaxInSessions;
sessions[session_id].reset();
applet_resource_user_ids[session_id] = 0;
}
Result Manager::LinkToManager() {
std::scoped_lock l{mutex};
if (!linked_to_manager) {
AudioManager& manager{system.AudioCore().GetAudioManager()};
manager.SetInManager(std::bind(&Manager::BufferReleaseAndRegister, this));
linked_to_manager = true;
}
return ResultSuccess;
}
void Manager::Start() {
if (sessions_started) {
return;
}
std::scoped_lock l{mutex};
for (auto& session : sessions) {
if (session) {
session->StartSession();
}
}
sessions_started = true;
}
void Manager::BufferReleaseAndRegister() {
std::scoped_lock l{mutex};
for (auto& session : sessions) {
if (session != nullptr) {
session->ReleaseAndRegisterBuffers();
}
}
}
u32 Manager::GetDeviceNames(std::vector<AudioRenderer::AudioDevice::AudioDeviceName>& names,
[[maybe_unused]] const u32 max_count,
[[maybe_unused]] const bool filter) {
std::scoped_lock l{mutex};
LinkToManager();
names.push_back(AudioRenderer::AudioDevice::AudioDeviceName("Uac"));
return 1;
}
} // namespace AudioCore::AudioIn

View file

@ -0,0 +1,92 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <mutex>
#include <vector>
#include "audio_core/renderer/audio_device.h"
namespace Core {
class System;
}
namespace AudioCore::AudioIn {
class In;
constexpr size_t MaxInSessions = 4;
/**
* Manages all audio in sessions.
*/
class Manager {
public:
explicit Manager(Core::System& system);
/**
* Acquire a free session id for opening a new audio in.
*
* @param session_id - Output session_id.
* @return Result code.
*/
Result AcquireSessionId(size_t& session_id);
/**
* Release a session id on close.
*
* @param session_id - Session id to free.
*/
void ReleaseSessionId(size_t session_id);
/**
* Link the audio in manager to the main audio manager.
*
* @return Result code.
*/
Result LinkToManager();
/**
* Start the audio in manager.
*/
void Start();
/**
* Callback function, called by the audio manager when the audio in event is signalled.
*/
void BufferReleaseAndRegister();
/**
* Get a list of audio in device names.
*
* @oaram names - Output container to write names to.
* @param max_count - Maximum numebr of deivce names to write. Unused
* @param filter - Should the list be filtered? Unused.
* @return Number of names written.
*/
u32 GetDeviceNames(std::vector<AudioRenderer::AudioDevice::AudioDeviceName>& names,
u32 max_count, bool filter);
/// Core system
Core::System& system;
/// Array of session ids
std::array<size_t, MaxInSessions> session_ids{};
/// Array of resource user ids
std::array<size_t, MaxInSessions> applet_resource_user_ids{};
/// Pointer to each open session
std::array<std::shared_ptr<In>, MaxInSessions> sessions{};
/// The number of free sessions
size_t num_free_sessions{};
/// The next session id to be taken
size_t next_session_id{};
/// The next session id to be freed
size_t free_session_id{};
/// Whether this is linked to the audio manager
bool linked_to_manager{};
/// Whether the sessions have been started
bool sessions_started{};
/// Protect state due to audio manager callback
std::recursive_mutex mutex{};
};
} // namespace AudioCore::AudioIn

View file

@ -0,0 +1,80 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_in_manager.h"
#include "audio_core/audio_manager.h"
#include "audio_core/audio_out_manager.h"
#include "core/core.h"
namespace AudioCore {
AudioManager::AudioManager(Core::System& system_) : system{system_} {
thread = std::jthread([this]() { ThreadFunc(); });
}
void AudioManager::Shutdown() {
running = false;
events.SetAudioEvent(Event::Type::Max, true);
thread.join();
}
Result AudioManager::SetOutManager(BufferEventFunc buffer_func) {
if (!running) {
return Service::Audio::ERR_OPERATION_FAILED;
}
std::scoped_lock l{lock};
const auto index{events.GetManagerIndex(Event::Type::AudioOutManager)};
if (buffer_events[index] == nullptr) {
buffer_events[index] = buffer_func;
needs_update = true;
events.SetAudioEvent(Event::Type::AudioOutManager, true);
}
return ResultSuccess;
}
Result AudioManager::SetInManager(BufferEventFunc buffer_func) {
if (!running) {
return Service::Audio::ERR_OPERATION_FAILED;
}
std::scoped_lock l{lock};
const auto index{events.GetManagerIndex(Event::Type::AudioInManager)};
if (buffer_events[index] == nullptr) {
buffer_events[index] = buffer_func;
needs_update = true;
events.SetAudioEvent(Event::Type::AudioInManager, true);
}
return ResultSuccess;
}
void AudioManager::SetEvent(const Event::Type type, const bool signalled) {
events.SetAudioEvent(type, signalled);
}
void AudioManager::ThreadFunc() {
std::unique_lock l{events.GetAudioEventLock()};
events.ClearEvents();
running = true;
while (running) {
auto timed_out{events.Wait(l, std::chrono::seconds(2))};
if (events.CheckAudioEventSet(Event::Type::Max)) {
break;
}
for (size_t i = 0; i < buffer_events.size(); i++) {
if (events.CheckAudioEventSet(Event::Type(i)) || timed_out) {
if (buffer_events[i]) {
buffer_events[i]();
}
}
events.SetAudioEvent(Event::Type(i), false);
}
}
}
} // namespace AudioCore

101
src/audio_core/audio_manager.h Executable file
View file

@ -0,0 +1,101 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <atomic>
#include <functional>
#include <mutex>
#include <thread>
#include "audio_core/audio_event.h"
#include "core/hle/service/audio/errors.h"
namespace Core {
class System;
}
namespace AudioCore {
namespace AudioOut {
class Manager;
}
namespace AudioIn {
class Manager;
}
/**
* The AudioManager's main purpose is to wait for buffer events for the audio in and out managers,
* and call an associated callback to release buffers.
*
* Execution pattern is:
* Buffers appended ->
* Buffers queued and played by the backend stream ->
* When consumed, set the corresponding manager event and signal the audio manager ->
* Consumed buffers are released, game is signalled ->
* Game appends more buffers.
*
* This is only used by audio in and audio out.
*/
class AudioManager {
using BufferEventFunc = std::function<void()>;
public:
explicit AudioManager(Core::System& system);
/**
* Shutdown the audio manager.
*/
void Shutdown();
/**
* Register the out manager, keeping a function to be called when the out event is signalled.
*
* @param buffer_func - Function to be called on signal.
* @return Result code.
*/
Result SetOutManager(BufferEventFunc buffer_func);
/**
* Register the in manager, keeping a function to be called when the in event is signalled.
*
* @param buffer_func - Function to be called on signal.
* @return Result code.
*/
Result SetInManager(BufferEventFunc buffer_func);
/**
* Set an event to signalled, and signal the thread.
*
* @param type - Manager type to set.
* @param signalled - Set the event to true or false?
*/
void SetEvent(Event::Type type, bool signalled);
private:
/**
* Main thread, waiting on a manager signal and calling the registered fucntion.
*/
void ThreadFunc();
/// Core system
Core::System& system;
/// Have sessions started palying?
bool sessions_started{};
/// Is the main thread running?
std::atomic<bool> running{};
/// Unused
bool needs_update{};
/// Events to be set and signalled
Event events{};
/// Callbacks for each manager
std::array<BufferEventFunc, 3> buffer_events{};
/// General lock
std::mutex lock{};
/// Main thread for waiting and callbacks
std::jthread thread;
};
} // namespace AudioCore

View file

@ -0,0 +1,81 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_core.h"
#include "audio_core/audio_manager.h"
#include "audio_core/audio_out_manager.h"
#include "audio_core/out/audio_out.h"
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/service/audio/errors.h"
namespace AudioCore::AudioOut {
Manager::Manager(Core::System& system_) : system{system_} {
std::iota(session_ids.begin(), session_ids.end(), 0);
num_free_sessions = MaxOutSessions;
}
Result Manager::AcquireSessionId(size_t& session_id) {
if (num_free_sessions == 0) {
LOG_ERROR(Service_Audio, "All 12 Audio Out sessions are in use, cannot create any more");
return Service::Audio::ERR_MAXIMUM_SESSIONS_REACHED;
}
session_id = session_ids[next_session_id];
next_session_id = (next_session_id + 1) % MaxOutSessions;
num_free_sessions--;
return ResultSuccess;
}
void Manager::ReleaseSessionId(const size_t session_id) {
std::scoped_lock l{mutex};
LOG_DEBUG(Service_Audio, "Freeing AudioOut session {}", session_id);
session_ids[free_session_id] = session_id;
num_free_sessions++;
free_session_id = (free_session_id + 1) % MaxOutSessions;
sessions[session_id].reset();
applet_resource_user_ids[session_id] = 0;
}
Result Manager::LinkToManager() {
std::scoped_lock l{mutex};
if (!linked_to_manager) {
AudioManager& manager{system.AudioCore().GetAudioManager()};
manager.SetOutManager(std::bind(&Manager::BufferReleaseAndRegister, this));
linked_to_manager = true;
}
return ResultSuccess;
}
void Manager::Start() {
if (sessions_started) {
return;
}
std::scoped_lock l{mutex};
for (auto& session : sessions) {
if (session) {
session->StartSession();
}
}
sessions_started = true;
}
void Manager::BufferReleaseAndRegister() {
std::scoped_lock l{mutex};
for (auto& session : sessions) {
if (session != nullptr) {
session->ReleaseAndRegisterBuffers();
}
}
}
u32 Manager::GetAudioOutDeviceNames(
std::vector<AudioRenderer::AudioDevice::AudioDeviceName>& names) const {
names.push_back({"DeviceOut"});
return 1;
}
} // namespace AudioCore::AudioOut

View file

@ -0,0 +1,89 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <mutex>
#include "audio_core/renderer/audio_device.h"
namespace Core {
class System;
}
namespace AudioCore::AudioOut {
class Out;
constexpr size_t MaxOutSessions = 12;
/**
* Manages all audio out sessions.
*/
class Manager {
public:
explicit Manager(Core::System& system);
/**
* Acquire a free session id for opening a new audio out.
*
* @param session_id - Output session_id.
* @return Result code.
*/
Result AcquireSessionId(size_t& session_id);
/**
* Release a session id on close.
*
* @param session_id - Session id to free.
*/
void ReleaseSessionId(size_t session_id);
/**
* Link this manager to the main audio manager.
*
* @return Result code.
*/
Result LinkToManager();
/**
* Start the audio out manager.
*/
void Start();
/**
* Callback function, called by the audio manager when the audio out event is signalled.
*/
void BufferReleaseAndRegister();
/**
* Get a list of audio out device names.
*
* @oaram names - Output container to write names to.
* @return Number of names written.
*/
u32 GetAudioOutDeviceNames(
std::vector<AudioRenderer::AudioDevice::AudioDeviceName>& names) const;
/// Core system
Core::System& system;
/// Array of session ids
std::array<size_t, MaxOutSessions> session_ids{};
/// Array of resource user ids
std::array<size_t, MaxOutSessions> applet_resource_user_ids{};
/// Pointer to each open session
std::array<std::shared_ptr<Out>, MaxOutSessions> sessions{};
/// The number of free sessions
size_t num_free_sessions{};
/// The next session id to be taken
size_t next_session_id{};
/// The next session id to be freed
size_t free_session_id{};
/// Whether this is linked to the audio manager
bool linked_to_manager{};
/// Whether the sessions have been started
bool sessions_started{};
/// Protect state due to audio manager callback
std::recursive_mutex mutex{};
};
} // namespace AudioCore::AudioOut

View file

@ -0,0 +1,70 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_render_manager.h"
#include "audio_core/common/audio_renderer_parameter.h"
#include "audio_core/common/feature_support.h"
#include "core/core.h"
namespace AudioCore::AudioRenderer {
Manager::Manager(Core::System& system_)
: system{system_}, system_manager{std::make_unique<SystemManager>(system)} {
std::iota(session_ids.begin(), session_ids.end(), 0);
}
Manager::~Manager() {
Stop();
}
void Manager::Stop() {
system_manager->Stop();
}
SystemManager& Manager::GetSystemManager() {
return *system_manager;
}
auto Manager::GetWorkBufferSize(const AudioRendererParameterInternal& params, u64& out_count)
-> Result {
if (!CheckValidRevision(params.revision)) {
return Service::Audio::ERR_INVALID_REVISION;
}
out_count = System::GetWorkBufferSize(params);
return ResultSuccess;
}
s32 Manager::GetSessionId() {
std::scoped_lock l{session_lock};
auto session_id{session_ids[session_count]};
if (session_id == -1) {
return -1;
}
session_ids[session_count] = -1;
session_count++;
return session_id;
}
void Manager::ReleaseSessionId(const s32 session_id) {
std::scoped_lock l{session_lock};
session_ids[--session_count] = session_id;
}
u32 Manager::GetSessionCount() {
std::scoped_lock l{session_lock};
return session_count;
}
bool Manager::AddSystem(System& system_) {
return system_manager->Add(system_);
}
bool Manager::RemoveSystem(System& system_) {
return system_manager->Remove(system_);
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,103 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <memory>
#include <mutex>
#include "audio_core/common/common.h"
#include "audio_core/renderer/system_manager.h"
#include "core/hle/service/audio/errors.h"
namespace Core {
class System;
}
namespace AudioCore {
struct AudioRendererParameterInternal;
namespace AudioRenderer {
/**
* Wrapper for the audio system manager, handles service calls.
*/
class Manager {
public:
explicit Manager(Core::System& system);
~Manager();
/**
* Stop the manager.
*/
void Stop();
/**
* Get the system manager.
*
* @return The system manager.
*/
SystemManager& GetSystemManager();
/**
* Get required size for the audio renderer workbuffer.
*
* @param params - Input parameters with the numbers of voices/mixes/sinks etc.
* @param out_count - Output size of the required workbuffer.
* @return Result code.
*/
Result GetWorkBufferSize(const AudioRendererParameterInternal& params, u64& out_count);
/**
* Get a new session id.
*
* @return The new session id. -1 if invalid, otherwise 0-MaxRendererSessions.
*/
s32 GetSessionId();
/**
* Get the number of currently active sessions.
*
* @return The number of active sessions.
*/
u32 GetSessionCount();
/**
* Add a renderer system to the manager.
* The system will be reguarly called to generate commands for the AudioRenderer.
*
* @param system - The system to add.
* @return True if the system was sucessfully added, otherwise false.
*/
bool AddSystem(System& system);
/**
* Remove a renderer system from the manager.
*
* @param system - The system to remove.
* @return True if the system was sucessfully removed, otherwise false.
*/
bool RemoveSystem(System& system);
/**
* Free a session id when the system wants to shut down.
*
* @param session_id - The session id to free.
*/
void ReleaseSessionId(s32 session_id);
private:
/// Core system
Core::System& system;
/// Session ids, -1 when in use
std::array<s32, MaxRendererSessions> session_ids{};
/// Number of active renderers
u32 session_count{};
/// Lock for interacting with the sessions
std::mutex session_lock{};
/// Regularly generates commands from the registered systems for the AudioRenderer
std::unique_ptr<SystemManager> system_manager{};
};
} // namespace AudioRenderer
} // namespace AudioCore

View file

@ -0,0 +1,60 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/renderer/behavior/behavior_info.h"
#include "audio_core/renderer/memory/memory_pool_info.h"
#include "audio_core/renderer/upsampler/upsampler_manager.h"
#include "common/common_types.h"
namespace AudioCore {
/**
* Execution mode of the audio renderer.
* Only Auto is currently supported.
*/
enum class ExecutionMode : u8 {
Auto,
Manual,
};
/**
* Parameters from the game, passed to the audio renderer for initialisation.
*/
struct AudioRendererParameterInternal {
/* 0x00 */ u32 sample_rate;
/* 0x04 */ u32 sample_count;
/* 0x08 */ u32 mixes;
/* 0x0C */ u32 sub_mixes;
/* 0x10 */ u32 voices;
/* 0x14 */ u32 sinks;
/* 0x18 */ u32 effects;
/* 0x1C */ u32 perf_frames;
/* 0x20 */ u16 voice_drop_enabled;
/* 0x22 */ u8 rendering_device;
/* 0x23 */ ExecutionMode execution_mode;
/* 0x24 */ u32 splitter_infos;
/* 0x28 */ s32 splitter_destinations;
/* 0x2C */ u32 external_context_size;
/* 0x30 */ u32 revision;
/* 0x34 */ char unk34[0x4];
};
static_assert(sizeof(AudioRendererParameterInternal) == 0x38,
"AudioRendererParameterInternal has the wrong size!");
/**
* Context for rendering, contains a bunch of useful fields for the command generator.
*/
struct AudioRendererSystemContext {
s32 session_id;
s8 channels;
s16 mix_buffer_count;
AudioRenderer::BehaviorInfo* behavior;
std::span<s32> depop_buffer;
AudioRenderer::UpsamplerManager* upsampler_manager;
AudioRenderer::MemoryPoolInfo* memory_pool_info;
};
} // namespace AudioCore

97
src/audio_core/common/common.h Executable file
View file

@ -0,0 +1,97 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <numeric>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace AudioCore {
using CpuAddr = std::uintptr_t;
enum class PlayState : u8 {
Started,
Stopped,
Paused,
};
enum class SrcQuality : u8 {
Medium,
High,
Low,
};
enum class SampleFormat : u8 {
Invalid,
PcmInt8,
PcmInt16,
PcmInt24,
PcmInt32,
PcmFloat,
Adpcm,
};
enum class SessionTypes {
AudioIn,
AudioOut,
FinalOutputRecorder,
};
constexpr u32 BufferCount = 32;
constexpr u32 MaxRendererSessions = 2;
constexpr u32 TargetSampleCount = 240;
constexpr u32 TargetSampleRate = 48'000;
constexpr u32 MaxChannels = 6;
constexpr u32 MaxMixBuffers = 24;
constexpr u32 MaxWaveBuffers = 4;
constexpr s32 LowestVoicePriority = 0xFF;
constexpr s32 HighestVoicePriority = 0;
constexpr u32 BufferAlignment = 0x40;
constexpr u32 WorkbufferAlignment = 0x1000;
constexpr s32 FinalMixId = 0;
constexpr s32 InvalidDistanceFromFinalMix = std::numeric_limits<s32>::min();
constexpr s32 UnusedSplitterId = -1;
constexpr s32 UnusedMixId = std::numeric_limits<s32>::max();
constexpr u32 InvalidNodeId = 0xF0000000;
constexpr s32 InvalidProcessOrder = -1;
constexpr u32 MaxBiquadFilters = 2;
constexpr u32 MaxEffects = 256;
constexpr bool IsChannelCountValid(u16 channel_count) {
return channel_count <= 6 &&
(channel_count == 1 || channel_count == 2 || channel_count == 4 || channel_count == 6);
}
constexpr u32 GetSplitterInParamHeaderMagic() {
return Common::MakeMagic('S', 'N', 'D', 'H');
}
constexpr u32 GetSplitterInfoMagic() {
return Common::MakeMagic('S', 'N', 'D', 'I');
}
constexpr u32 GetSplitterSendDataMagic() {
return Common::MakeMagic('S', 'N', 'D', 'D');
}
constexpr size_t GetSampleFormatByteSize(SampleFormat format) {
switch (format) {
case SampleFormat::PcmInt8:
return 1;
case SampleFormat::PcmInt16:
return 2;
case SampleFormat::PcmInt24:
return 3;
case SampleFormat::PcmInt32:
case SampleFormat::PcmFloat:
return 4;
default:
return 2;
}
}
} // namespace AudioCore

View file

@ -0,0 +1,99 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <map>
#include <ranges>
#include <tuple>
#include "common/assert.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
namespace AudioCore {
constexpr u32 CurrentRevision = 10;
enum class SupportTags {
CommandProcessingTimeEstimatorVersion4,
CommandProcessingTimeEstimatorVersion3,
CommandProcessingTimeEstimatorVersion2,
MultiTapBiquadFilterProcessing,
EffectInfoVer2,
WaveBufferVer2,
BiquadFilterFloatProcessing,
VolumeMixParameterPrecisionQ23,
MixInParameterDirtyOnlyUpdate,
BiquadFilterEffectStateClearBugFix,
VoicePlayedSampleCountResetAtLoopPoint,
VoicePitchAndSrcSkipped,
SplitterBugFix,
FlushVoiceWaveBuffers,
ElapsedFrameCount,
AudioRendererVariadicCommandBufferSize,
PerformanceMetricsDataFormatVersion2,
AudioRendererProcessingTimeLimit80Percent,
AudioRendererProcessingTimeLimit75Percent,
AudioRendererProcessingTimeLimit70Percent,
AdpcmLoopContextBugFix,
Splitter,
LongSizePreDelay,
AudioUsbDeviceOutput,
DeviceApiVersion2,
// Not a real tag, just here to get the count.
Size
};
constexpr u32 GetRevisionNum(u32 user_revision) {
if (user_revision >= 0x100) {
user_revision -= Common::MakeMagic('R', 'E', 'V', '0');
user_revision >>= 24;
}
return user_revision;
};
constexpr bool CheckFeatureSupported(SupportTags tag, u32 user_revision) {
constexpr std::array<std::pair<SupportTags, u32>, static_cast<u32>(SupportTags::Size)> features{
{
{SupportTags::AudioRendererProcessingTimeLimit70Percent, 1},
{SupportTags::Splitter, 2},
{SupportTags::AdpcmLoopContextBugFix, 2},
{SupportTags::LongSizePreDelay, 3},
{SupportTags::AudioUsbDeviceOutput, 4},
{SupportTags::AudioRendererProcessingTimeLimit75Percent, 4},
{SupportTags::VoicePlayedSampleCountResetAtLoopPoint, 5},
{SupportTags::VoicePitchAndSrcSkipped, 5},
{SupportTags::SplitterBugFix, 5},
{SupportTags::FlushVoiceWaveBuffers, 5},
{SupportTags::ElapsedFrameCount, 5},
{SupportTags::AudioRendererProcessingTimeLimit80Percent, 5},
{SupportTags::AudioRendererVariadicCommandBufferSize, 5},
{SupportTags::PerformanceMetricsDataFormatVersion2, 5},
{SupportTags::CommandProcessingTimeEstimatorVersion2, 5},
{SupportTags::BiquadFilterEffectStateClearBugFix, 6},
{SupportTags::BiquadFilterFloatProcessing, 7},
{SupportTags::VolumeMixParameterPrecisionQ23, 7},
{SupportTags::MixInParameterDirtyOnlyUpdate, 7},
{SupportTags::WaveBufferVer2, 8},
{SupportTags::CommandProcessingTimeEstimatorVersion3, 8},
{SupportTags::EffectInfoVer2, 9},
{SupportTags::CommandProcessingTimeEstimatorVersion4, 10},
{SupportTags::MultiTapBiquadFilterProcessing, 10},
}};
const auto& feature =
std::ranges::find_if(features, [tag](const auto& entry) { return entry.first == tag; });
if (feature == features.cend()) {
LOG_ERROR(Service_Audio, "Invalid SupportTag {}!", static_cast<u32>(tag));
return false;
}
user_revision = GetRevisionNum(user_revision);
return (*feature).second <= user_revision;
}
constexpr bool CheckValidRevision(u32 user_revision) {
return GetRevisionNum(user_revision) <= CurrentRevision;
};
} // namespace AudioCore

View file

@ -0,0 +1,35 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_types.h"
namespace AudioCore {
struct WaveBufferVersion1 {
CpuAddr buffer;
u64 buffer_size;
u32 start_offset;
u32 end_offset;
bool loop;
bool stream_ended;
CpuAddr context;
u64 context_size;
};
struct WaveBufferVersion2 {
CpuAddr buffer;
CpuAddr context;
u64 buffer_size;
u64 context_size;
u32 start_offset;
u32 end_offset;
u32 loop_start_offset;
u32 loop_end_offset;
s32 loop_count;
bool loop;
bool stream_ended;
};
} // namespace AudioCore

View file

@ -0,0 +1,100 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "common/alignment.h"
#include "common/assert.h"
#include "common/common_types.h"
namespace AudioCore {
/**
* Responsible for allocating up a workbuffer into multiple pieces.
* Takes in a buffer and size (it does not own them), and allocates up the buffer via Allocate.
*/
class WorkbufferAllocator {
public:
explicit WorkbufferAllocator(std::span<u8> buffer_, u64 size_)
: buffer{reinterpret_cast<u64>(buffer_.data())}, size{size_} {}
/**
* Allocate the given count of T elements, aligned to alignment.
*
* @param count - The number of elements to allocate.
* @param alignment - The required starting alignment.
* @return Non-owning container of allocated elements.
*/
template <typename T>
std::span<T> Allocate(u64 count, u64 alignment) {
u64 out{0};
u64 byte_size{count * sizeof(T)};
if (byte_size > 0) {
auto current{buffer + offset};
auto aligned_buffer{Common::AlignUp(current, alignment)};
if (aligned_buffer + byte_size <= buffer + size) {
out = aligned_buffer;
offset = byte_size - buffer + aligned_buffer;
} else {
LOG_ERROR(
Service_Audio,
"Allocated buffer was too small to hold new alloc.\nAllocator size={:08X}, "
"offset={:08X}.\nAttempting to allocate {:08X} with alignment={:02X}",
size, offset, byte_size, alignment);
count = 0;
}
}
return std::span<T>(reinterpret_cast<T*>(out), count);
}
/**
* Align the current offset to the given alignment.
*
* @param alignment - The required starting alignment.
*/
void Align(u64 alignment) {
auto current{buffer + offset};
auto aligned_buffer{Common::AlignUp(current, alignment)};
offset = 0 - buffer + aligned_buffer;
}
/**
* Get the current buffer offset.
*
* @return The current allocating offset.
*/
u64 GetCurrentOffset() const {
return offset;
}
/**
* Get the current buffer size.
*
* @return The size of the current buffer.
*/
u64 GetSize() const {
return size;
}
/**
* Get the remaining size that can be allocated.
*
* @return The remaining size left in the buffer.
*/
u64 GetRemainingSize() const {
return size - offset;
}
private:
/// The buffer into which we are allocating.
u64 buffer;
/// Size of the buffer we're allocating to.
u64 size;
/// Current offset into the buffer, an error will be thrown if it exceeds size.
u64 offset{};
};
} // namespace AudioCore

View file

@ -0,0 +1,21 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_types.h"
namespace AudioCore {
struct AudioBuffer {
/// Timestamp this buffer completed playing.
s64 played_timestamp;
/// Game memory address for these samples.
VAddr samples;
/// Unqiue identifier for this buffer.
u64 tag;
/// Size of the samples buffer.
u64 size;
};
} // namespace AudioCore

View file

@ -0,0 +1,304 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <mutex>
#include <span>
#include <vector>
#include "audio_buffer.h"
#include "audio_core/device/device_session.h"
#include "core/core_timing.h"
namespace AudioCore {
constexpr s32 BufferAppendLimit = 4;
/**
* A ringbuffer of N audio buffers.
* The buffer contains 3 sections:
* Appended - Buffers added to the ring, but have yet to be sent to the audio backend.
* Registered - Buffers sent to the backend and queued for playback.
* Released - Buffers which have been played, and can now be recycled.
* Any others are free/untracked.
*
* @tparam N - Maximum number of buffers in the ring.
*/
template <size_t N>
class AudioBuffers {
public:
explicit AudioBuffers(size_t limit) : append_limit{static_cast<u32>(limit)} {}
/**
* Append a new audio buffer to the ring.
*
* @param buffer - The new buffer.
*/
void AppendBuffer(AudioBuffer& buffer) {
std::scoped_lock l{lock};
buffers[appended_index] = buffer;
appended_count++;
appended_index = (appended_index + 1) % append_limit;
}
/**
* Register waiting buffers, up to a maximum of BufferAppendLimit.
*
* @param out_buffers - The buffers which were registered.
*/
void RegisterBuffers(std::vector<AudioBuffer>& out_buffers) {
std::scoped_lock l{lock};
const s32 to_register{std::min(std::min(appended_count, BufferAppendLimit),
BufferAppendLimit - registered_count)};
for (s32 i = 0; i < to_register; i++) {
s32 index{appended_index - appended_count};
if (index < 0) {
index += N;
}
out_buffers.push_back(buffers[index]);
registered_count++;
registered_index = (registered_index + 1) % append_limit;
appended_count--;
if (appended_count == 0) {
break;
}
}
}
/**
* Release a single buffer. Must be already registered.
*
* @param index - The buffer index to release.
* @param timestamp - The released timestamp for this buffer.
*/
void ReleaseBuffer(s32 index, s64 timestamp) {
std::scoped_lock l{lock};
buffers[index].played_timestamp = timestamp;
registered_count--;
released_count++;
released_index = (released_index + 1) % append_limit;
}
/**
* Release all registered buffers.
*
* @param timestamp - The released timestamp for this buffer.
* @return Is the buffer was released.
*/
bool ReleaseBuffers(Core::Timing::CoreTiming& core_timing, DeviceSession& session) {
std::scoped_lock l{lock};
bool buffer_released{false};
while (registered_count > 0) {
auto index{registered_index - registered_count};
if (index < 0) {
index += N;
}
// Check with the backend if this buffer can be released yet.
if (!session.IsBufferConsumed(buffers[index].tag)) {
break;
}
ReleaseBuffer(index, core_timing.GetGlobalTimeNs().count());
buffer_released = true;
}
return buffer_released || registered_count == 0;
}
/**
* Get all released buffers.
*
* @param tags - Container to be filled with the released buffers' tags.
* @return The number of buffers released.
*/
u32 GetReleasedBuffers(std::span<u64> tags) {
std::scoped_lock l{lock};
u32 released{0};
while (released_count > 0) {
auto index{released_index - released_count};
if (index < 0) {
index += N;
}
auto& buffer{buffers[index]};
released_count--;
auto tag{buffer.tag};
buffer.played_timestamp = 0;
buffer.samples = 0;
buffer.tag = 0;
buffer.size = 0;
if (tag == 0) {
break;
}
tags[released++] = tag;
if (released >= tags.size()) {
break;
}
}
return released;
}
/**
* Get all appended and registered buffers.
*
* @param buffers_flushed - Output vector for the buffers which are released.
* @param max_buffers - Maximum number of buffers to released.
* @return The number of buffers released.
*/
u32 GetRegisteredAppendedBuffers(std::vector<AudioBuffer>& buffers_flushed, u32 max_buffers) {
std::scoped_lock l{lock};
if (registered_count + appended_count == 0) {
return 0;
}
size_t buffers_to_flush{
std::min(static_cast<u32>(registered_count + appended_count), max_buffers)};
if (buffers_to_flush == 0) {
return 0;
}
while (registered_count > 0) {
auto index{registered_index - registered_count};
if (index < 0) {
index += N;
}
buffers_flushed.push_back(buffers[index]);
registered_count--;
released_count++;
released_index = (released_index + 1) % append_limit;
if (buffers_flushed.size() >= buffers_to_flush) {
break;
}
}
while (appended_count > 0) {
auto index{appended_index - appended_count};
if (index < 0) {
index += N;
}
buffers_flushed.push_back(buffers[index]);
appended_count--;
released_count++;
released_index = (released_index + 1) % append_limit;
if (buffers_flushed.size() >= buffers_to_flush) {
break;
}
}
return static_cast<u32>(buffers_flushed.size());
}
/**
* Check if the given tag is in the buffers.
*
* @param tag - Unique tag of the buffer to search for.
* @return True if the buffer is still in the ring, otherwise false.
*/
bool ContainsBuffer(const u64 tag) const {
std::scoped_lock l{lock};
const auto registered_buffers{appended_count + registered_count + released_count};
if (registered_buffers == 0) {
return false;
}
auto index{released_index - released_count};
if (index < 0) {
index += append_limit;
}
for (s32 i = 0; i < registered_buffers; i++) {
if (buffers[index].tag == tag) {
return true;
}
index = (index + 1) % append_limit;
}
return false;
}
/**
* Get the number of active buffers in the ring.
* That is, appended, registered and released buffers.
*
* @return Number of active buffers.
*/
u32 GetAppendedRegisteredCount() const {
std::scoped_lock l{lock};
return appended_count + registered_count;
}
/**
* Get the total number of active buffers in the ring.
* That is, appended, registered and released buffers.
*
* @return Number of active buffers.
*/
u32 GetTotalBufferCount() const {
std::scoped_lock l{lock};
return static_cast<u32>(appended_count + registered_count + released_count);
}
/**
* Flush all of the currently appended and registered buffers
*
* @param buffers_released - Output count for the number of buffers released.
* @return True if buffers were successfully flushed, otherwise false.
*/
bool FlushBuffers(u32& buffers_released) {
std::scoped_lock l{lock};
std::vector<AudioBuffer> buffers_flushed{};
buffers_released = GetRegisteredAppendedBuffers(buffers_flushed, append_limit);
if (registered_count > 0) {
return false;
}
if (static_cast<u32>(released_count + appended_count) > append_limit) {
return false;
}
return true;
}
private:
/// Buffer lock
mutable std::recursive_mutex lock{};
/// The audio buffers
std::array<AudioBuffer, N> buffers{};
/// Current released index
s32 released_index{};
/// Number of released buffers
s32 released_count{};
/// Current registered index
s32 registered_index{};
/// Number of registered buffers
s32 registered_count{};
/// Current appended index
s32 appended_index{};
/// Number of appended buffers
s32 appended_count{};
/// Maximum number of buffers (default 32)
u32 append_limit{};
};
} // namespace AudioCore

View file

@ -0,0 +1,111 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_core.h"
#include "audio_core/audio_manager.h"
#include "audio_core/device/audio_buffer.h"
#include "audio_core/device/device_session.h"
#include "audio_core/sink/sink_stream.h"
#include "core/core.h"
#include "core/memory.h"
namespace AudioCore {
DeviceSession::DeviceSession(Core::System& system_) : system{system_} {}
DeviceSession::~DeviceSession() {
Finalize();
}
Result DeviceSession::Initialize(std::string_view name_, SampleFormat sample_format_,
u16 channel_count_, size_t session_id_, u32 handle_,
u64 applet_resource_user_id_, Sink::StreamType type_) {
if (stream) {
Finalize();
}
name = fmt::format("{}-{}", name_, session_id_);
type = type_;
sample_format = sample_format_;
channel_count = channel_count_;
session_id = session_id_;
handle = handle_;
applet_resource_user_id = applet_resource_user_id_;
if (type == Sink::StreamType::In) {
sink = &system.AudioCore().GetInputSink();
} else {
sink = &system.AudioCore().GetOutputSink();
}
stream = sink->AcquireSinkStream(system, channel_count, name, type);
return ResultSuccess;
}
void DeviceSession::Finalize() {
Stop();
sink->CloseStream(stream);
stream = nullptr;
}
void DeviceSession::Start() {
stream->SetPlayedSampleCount(played_sample_count);
stream->Start();
}
void DeviceSession::Stop() {
if (stream) {
played_sample_count = stream->GetPlayedSampleCount();
stream->Stop();
}
}
void DeviceSession::AppendBuffers(std::span<AudioBuffer> buffers) const {
auto& memory{system.Memory()};
for (size_t i = 0; i < buffers.size(); i++) {
Sink::SinkBuffer new_buffer{
.frames = buffers[i].size / (channel_count * sizeof(s16)),
.frames_played = 0,
.tag = buffers[i].tag,
.consumed = false,
};
if (type == Sink::StreamType::In) {
std::vector<s16> samples{};
stream->AppendBuffer(new_buffer, samples);
} else {
std::vector<s16> samples(buffers[i].size / sizeof(s16));
memory.ReadBlockUnsafe(buffers[i].samples, samples.data(), buffers[i].size);
stream->AppendBuffer(new_buffer, samples);
}
}
}
void DeviceSession::ReleaseBuffer(AudioBuffer& buffer) const {
if (type == Sink::StreamType::In) {
auto& memory{system.Memory()};
auto samples{stream->ReleaseBuffer(buffer.size / sizeof(s16))};
memory.WriteBlockUnsafe(buffer.samples, samples.data(), buffer.size);
}
}
bool DeviceSession::IsBufferConsumed(u64 tag) const {
if (stream) {
return stream->IsBufferConsumed(tag);
}
return true;
}
void DeviceSession::SetVolume(f32 volume) const {
if (stream) {
stream->SetSystemVolume(volume);
}
}
u64 DeviceSession::GetPlayedSampleCount() const {
if (stream) {
return stream->GetPlayedSampleCount();
}
return 0;
}
} // namespace AudioCore

View file

@ -0,0 +1,124 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/common/common.h"
#include "audio_core/sink/sink.h"
#include "core/hle/service/audio/errors.h"
namespace Core {
class System;
}
namespace AudioCore {
namespace Sink {
class SinkStream;
struct SinkBuffer;
} // namespace Sink
struct AudioBuffer;
/**
* Represents an input or output device stream for audio in and audio out (not used for render).
**/
class DeviceSession {
public:
explicit DeviceSession(Core::System& system);
~DeviceSession();
/**
* Initialize this device session.
*
* @param name - Name of this device.
* @param sample_format - Sample format for this device's output.
* @param channel_count - Number of channels for this device (2 or 6).
* @param session_id - This session's id.
* @param handle - Handle for this device session (unused).
* @param applet_resource_user_id - Applet resource user id for this device session (unused).
* @param type - Type of this stream (Render, In, Out).
* @return Result code for this call.
*/
Result Initialize(std::string_view name, SampleFormat sample_format, u16 channel_count,
size_t session_id, u32 handle, u64 applet_resource_user_id,
Sink::StreamType type);
/**
* Finalize this device session.
*/
void Finalize();
/**
* Append audio buffers to this device session to be played back.
*
* @param buffers - The buffers to play.
*/
void AppendBuffers(std::span<AudioBuffer> buffers) const;
/**
* (Audio In only) Pop samples from the backend, and write them back to this buffer's address.
*
* @param buffer - The buffer to write to.
*/
void ReleaseBuffer(AudioBuffer& buffer) const;
/**
* Check if the buffer for the given tag has been consumed by the backend.
*
* @param tag - Unqiue tag of the buffer to check.
* @return true if the buffer has been consumed, otherwise false.
*/
bool IsBufferConsumed(u64 tag) const;
/**
* Start this device session, starting the backend stream.
*/
void Start();
/**
* Stop this device session, stopping the backend stream.
*/
void Stop();
/**
* Set this device session's volume.
*
* @param volume - New volume for this session.
*/
void SetVolume(f32 volume) const;
/**
* Get this device session's total played sample count.
*
* @return Samples played by this session.
*/
u64 GetPlayedSampleCount() const;
private:
/// System
Core::System& system;
/// Output sink this device will use
Sink::Sink* sink;
/// The backend stream for this device session to send samples to
Sink::SinkStream* stream{};
/// Name of this device session
std::string name{};
/// Type of this device session (render/in/out)
Sink::StreamType type{};
/// Sample format for this device.
SampleFormat sample_format{SampleFormat::PcmInt16};
/// Channel count for this device session
u16 channel_count{};
/// Session id of this device session
size_t session_id{};
/// Handle of this device session
u32 handle{};
/// Applet resource user id of this device session
u64 applet_resource_user_id{};
/// Total number of samples played by this device session
u64 played_sample_count{};
};
} // namespace AudioCore

100
src/audio_core/in/audio_in.cpp Executable file
View file

@ -0,0 +1,100 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_in_manager.h"
#include "audio_core/in/audio_in.h"
#include "core/hle/kernel/k_event.h"
namespace AudioCore::AudioIn {
In::In(Core::System& system_, Manager& manager_, Kernel::KEvent* event_, size_t session_id_)
: manager{manager_}, parent_mutex{manager.mutex}, event{event_}, system{system_, event,
session_id_} {}
void In::Free() {
std::scoped_lock l{parent_mutex};
manager.ReleaseSessionId(system.GetSessionId());
}
System& In::GetSystem() {
return system;
}
AudioIn::State In::GetState() {
std::scoped_lock l{parent_mutex};
return system.GetState();
}
Result In::StartSystem() {
std::scoped_lock l{parent_mutex};
return system.Start();
}
void In::StartSession() {
std::scoped_lock l{parent_mutex};
system.StartSession();
}
Result In::StopSystem() {
std::scoped_lock l{parent_mutex};
return system.Stop();
}
Result In::AppendBuffer(const AudioInBuffer& buffer, u64 tag) {
std::scoped_lock l{parent_mutex};
if (system.AppendBuffer(buffer, tag)) {
return ResultSuccess;
}
return Service::Audio::ERR_BUFFER_COUNT_EXCEEDED;
}
void In::ReleaseAndRegisterBuffers() {
std::scoped_lock l{parent_mutex};
if (system.GetState() == State::Started) {
system.ReleaseBuffers();
system.RegisterBuffers();
}
}
bool In::FlushAudioInBuffers() {
std::scoped_lock l{parent_mutex};
return system.FlushAudioInBuffers();
}
u32 In::GetReleasedBuffers(std::span<u64> tags) {
std::scoped_lock l{parent_mutex};
return system.GetReleasedBuffers(tags);
}
Kernel::KReadableEvent& In::GetBufferEvent() {
std::scoped_lock l{parent_mutex};
return event->GetReadableEvent();
}
f32 In::GetVolume() {
std::scoped_lock l{parent_mutex};
return system.GetVolume();
}
void In::SetVolume(f32 volume) {
std::scoped_lock l{parent_mutex};
system.SetVolume(volume);
}
bool In::ContainsAudioBuffer(u64 tag) {
std::scoped_lock l{parent_mutex};
return system.ContainsAudioBuffer(tag);
}
u32 In::GetBufferCount() {
std::scoped_lock l{parent_mutex};
return system.GetBufferCount();
}
u64 In::GetPlayedSampleCount() {
std::scoped_lock l{parent_mutex};
return system.GetPlayedSampleCount();
}
} // namespace AudioCore::AudioIn

147
src/audio_core/in/audio_in.h Executable file
View file

@ -0,0 +1,147 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <mutex>
#include "audio_core/in/audio_in_system.h"
namespace Core {
class System;
}
namespace Kernel {
class KEvent;
class KReadableEvent;
} // namespace Kernel
namespace AudioCore::AudioIn {
class Manager;
/**
* Interface between the service and audio in system. Mainly responsible for forwarding service
* calls to the system.
*/
class In {
public:
explicit In(Core::System& system, Manager& manager, Kernel::KEvent* event, size_t session_id);
/**
* Free this audio in from the audio in manager.
*/
void Free();
/**
* Get this audio in's system.
*/
System& GetSystem();
/**
* Get the current state.
*
* @return Started or Stopped.
*/
AudioIn::State GetState();
/**
* Start the system
*
* @return Result code
*/
Result StartSystem();
/**
* Start the system's device session.
*/
void StartSession();
/**
* Stop the system.
*
* @return Result code
*/
Result StopSystem();
/**
* Append a new buffer to the system, the buffer event will be signalled when it is filled.
*
* @param buffer - The new buffer to append.
* @param tag - Unique tag for this buffer.
* @return Result code.
*/
Result AppendBuffer(const AudioInBuffer& buffer, u64 tag);
/**
* Release all completed buffers, and register any appended.
*/
void ReleaseAndRegisterBuffers();
/**
* Flush all buffers.
*/
bool FlushAudioInBuffers();
/**
* Get all of the currently released buffers.
*
* @param tags - Output container for the buffer tags which were released.
* @return The number of buffers released.
*/
u32 GetReleasedBuffers(std::span<u64> tags);
/**
* Get the buffer event for this audio in, this event will be signalled when a buffer is filled.
*
* @return The buffer event.
*/
Kernel::KReadableEvent& GetBufferEvent();
/**
* Get the current system volume.
*
* @return The current volume.
*/
f32 GetVolume();
/**
* Set the system volume.
*
* @param volume - The volume to set.
*/
void SetVolume(f32 volume);
/**
* Check if a buffer is in the system.
*
* @param tag - The tag to search for.
* @return True if the buffer is in the system, otherwise false.
*/
bool ContainsAudioBuffer(u64 tag);
/**
* Get the maximum number of buffers.
*
* @return The maximum number of buffers.
*/
u32 GetBufferCount();
/**
* Get the total played sample count for this audio in.
*
* @return The played sample count.
*/
u64 GetPlayedSampleCount();
private:
/// The AudioIn::Manager this audio in is registered with
Manager& manager;
/// Manager's mutex
std::recursive_mutex& parent_mutex;
/// Buffer event, signalled when buffers are ready to be released
Kernel::KEvent* event;
/// Main audio in system
System system;
};
} // namespace AudioCore::AudioIn

View file

@ -0,0 +1,213 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <mutex>
#include "audio_core/audio_event.h"
#include "audio_core/audio_manager.h"
#include "audio_core/in/audio_in_system.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_event.h"
namespace AudioCore::AudioIn {
System::System(Core::System& system_, Kernel::KEvent* event_, const size_t session_id_)
: system{system_}, buffer_event{event_},
session_id{session_id_}, session{std::make_unique<DeviceSession>(system_)} {}
System::~System() {
Finalize();
}
void System::Finalize() {
Stop();
session->Finalize();
buffer_event->GetWritableEvent().Signal();
}
void System::StartSession() {
session->Start();
}
size_t System::GetSessionId() const {
return session_id;
}
std::string_view System::GetDefaultDeviceName() {
return "BuiltInHeadset";
}
std::string_view System::GetDefaultUacDeviceName() {
return "Uac";
}
Result System::IsConfigValid(const std::string_view device_name,
const AudioInParameter& in_params) {
if ((device_name.size() > 0) &&
(device_name != GetDefaultDeviceName() && device_name != GetDefaultUacDeviceName())) {
return Service::Audio::ERR_INVALID_DEVICE_NAME;
}
if (in_params.sample_rate != TargetSampleRate && in_params.sample_rate > 0) {
return Service::Audio::ERR_INVALID_SAMPLE_RATE;
}
return ResultSuccess;
}
Result System::Initialize(std::string& device_name, const AudioInParameter& in_params,
const u32 handle_, const u64 applet_resource_user_id_) {
auto result{IsConfigValid(device_name, in_params)};
if (result.IsError()) {
return result;
}
handle = handle_;
applet_resource_user_id = applet_resource_user_id_;
if (device_name.empty() || device_name[0] == '\0') {
name = std::string(GetDefaultDeviceName());
} else {
name = std::move(device_name);
}
sample_rate = TargetSampleRate;
sample_format = SampleFormat::PcmInt16;
channel_count = in_params.channel_count <= 2 ? 2 : 6;
volume = 1.0f;
is_uac = name == "Uac";
return ResultSuccess;
}
Result System::Start() {
if (state != State::Stopped) {
return Service::Audio::ERR_OPERATION_FAILED;
}
session->Initialize(name, sample_format, channel_count, session_id, handle,
applet_resource_user_id, Sink::StreamType::In);
session->SetVolume(volume);
session->Start();
state = State::Started;
std::vector<AudioBuffer> buffers_to_flush{};
buffers.RegisterBuffers(buffers_to_flush);
session->AppendBuffers(buffers_to_flush);
return ResultSuccess;
}
Result System::Stop() {
if (state == State::Started) {
session->Stop();
session->SetVolume(0.0f);
state = State::Stopped;
}
return ResultSuccess;
}
bool System::AppendBuffer(const AudioInBuffer& buffer, const u64 tag) {
if (buffers.GetTotalBufferCount() == BufferCount) {
return false;
}
AudioBuffer new_buffer{
.played_timestamp = 0, .samples = buffer.samples, .tag = tag, .size = buffer.size};
buffers.AppendBuffer(new_buffer);
RegisterBuffers();
return true;
}
void System::RegisterBuffers() {
if (state == State::Started) {
std::vector<AudioBuffer> registered_buffers{};
buffers.RegisterBuffers(registered_buffers);
session->AppendBuffers(registered_buffers);
}
}
void System::ReleaseBuffers() {
bool signal{buffers.ReleaseBuffers(system.CoreTiming(), *session)};
if (signal) {
// Signal if any buffer was released, or if none are registered, we need more.
buffer_event->GetWritableEvent().Signal();
}
}
u32 System::GetReleasedBuffers(std::span<u64> tags) {
return buffers.GetReleasedBuffers(tags);
}
bool System::FlushAudioInBuffers() {
if (state != State::Started) {
return false;
}
u32 buffers_released{};
buffers.FlushBuffers(buffers_released);
if (buffers_released > 0) {
buffer_event->GetWritableEvent().Signal();
}
return true;
}
u16 System::GetChannelCount() const {
return channel_count;
}
u32 System::GetSampleRate() const {
return sample_rate;
}
SampleFormat System::GetSampleFormat() const {
return sample_format;
}
State System::GetState() {
switch (state) {
case State::Started:
case State::Stopped:
return state;
default:
LOG_ERROR(Service_Audio, "AudioIn invalid state!");
state = State::Stopped;
break;
}
return state;
}
std::string System::GetName() const {
return name;
}
f32 System::GetVolume() const {
return volume;
}
void System::SetVolume(const f32 volume_) {
volume = volume_;
session->SetVolume(volume_);
}
bool System::ContainsAudioBuffer(const u64 tag) {
return buffers.ContainsBuffer(tag);
}
u32 System::GetBufferCount() {
return buffers.GetAppendedRegisteredCount();
}
u64 System::GetPlayedSampleCount() const {
return session->GetPlayedSampleCount();
}
bool System::IsUac() const {
return is_uac;
}
} // namespace AudioCore::AudioIn

View file

@ -0,0 +1,275 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include <memory>
#include <span>
#include <string>
#include "audio_core/common/common.h"
#include "audio_core/device/audio_buffers.h"
#include "audio_core/device/device_session.h"
#include "core/hle/service/audio/errors.h"
namespace Core {
class System;
}
namespace Kernel {
class KEvent;
}
namespace AudioCore::AudioIn {
constexpr SessionTypes SessionType = SessionTypes::AudioIn;
struct AudioInParameter {
/* 0x0 */ s32_le sample_rate;
/* 0x4 */ u16_le channel_count;
/* 0x6 */ u16_le reserved;
};
static_assert(sizeof(AudioInParameter) == 0x8, "AudioInParameter is an invalid size");
struct AudioInParameterInternal {
/* 0x0 */ u32_le sample_rate;
/* 0x4 */ u32_le channel_count;
/* 0x8 */ u32_le sample_format;
/* 0xC */ u32_le state;
};
static_assert(sizeof(AudioInParameterInternal) == 0x10,
"AudioInParameterInternal is an invalid size");
struct AudioInBuffer {
/* 0x00 */ AudioInBuffer* next;
/* 0x08 */ VAddr samples;
/* 0x10 */ u64 capacity;
/* 0x18 */ u64 size;
/* 0x20 */ u64 offset;
};
static_assert(sizeof(AudioInBuffer) == 0x28, "AudioInBuffer is an invalid size");
enum class State {
Started,
Stopped,
};
/**
* Controls and drives audio input.
*/
class System {
public:
explicit System(Core::System& system, Kernel::KEvent* event, size_t session_id);
~System();
/**
* Get the default audio input device name.
*
* @return The default audio input device name.
*/
std::string_view GetDefaultDeviceName();
/**
* Get the default USB audio input device name.
* This is preferred over non-USB as some games refuse to work with the BuiltInHeadset
* (e.g Let's Sing).
*
* @return The default USB audio input device name.
*/
std::string_view GetDefaultUacDeviceName();
/**
* Is the given initialize config valid?
*
* @param device_name - The name of the requested input device.
* @param in_params - Input parameters, see AudioInParameter.
* @return Result code.
*/
Result IsConfigValid(std::string_view device_name, const AudioInParameter& in_params);
/**
* Initialize this system.
*
* @param device_name - The name of the requested input device.
* @param in_params - Input parameters, see AudioInParameter.
* @param handle - Unused.
* @param applet_resource_user_id - Unused.
* @return Result code.
*/
Result Initialize(std::string& device_name, const AudioInParameter& in_params, u32 handle,
u64 applet_resource_user_id);
/**
* Start this system.
*
* @return Result code.
*/
Result Start();
/**
* Stop this system.
*
* @return Result code.
*/
Result Stop();
/**
* Finalize this system.
*/
void Finalize();
/**
* Start this system's device session.
*/
void StartSession();
/**
* Get this system's id.
*/
size_t GetSessionId() const;
/**
* Append a new buffer to the device.
*
* @param buffer - New buffer to append.
* @param tag - Unique tag of the buffer.
* @return True if the buffer was appended, otherwise false.
*/
bool AppendBuffer(const AudioInBuffer& buffer, u64 tag);
/**
* Register all appended buffers.
*/
void RegisterBuffers();
/**
* Release all registered buffers.
*/
void ReleaseBuffers();
/**
* Get all released buffers.
*
* @param tags - Container to be filled with the released buffers' tags.
* @return The number of buffers released.
*/
u32 GetReleasedBuffers(std::span<u64> tags);
/**
* Flush all appended and registered buffers.
*
* @return True if buffers were successfully flushed, otherwise false.
*/
bool FlushAudioInBuffers();
/**
* Get this system's current channel count.
*
* @return The channel count.
*/
u16 GetChannelCount() const;
/**
* Get this system's current sample rate.
*
* @return The sample rate.
*/
u32 GetSampleRate() const;
/**
* Get this system's current sample format.
*
* @return The sample format.
*/
SampleFormat GetSampleFormat() const;
/**
* Get this system's current state.
*
* @return The current state.
*/
State GetState();
/**
* Get this system's name.
*
* @return The system's name.
*/
std::string GetName() const;
/**
* Get this system's current volume.
*
* @return The system's current volume.
*/
f32 GetVolume() const;
/**
* Set this system's current volume.
*
* @param The new volume.
*/
void SetVolume(f32 volume);
/**
* Does the system contain this buffer?
*
* @param tag - Unique tag to search for.
* @return True if the buffer is in the system, otherwise false.
*/
bool ContainsAudioBuffer(u64 tag);
/**
* Get the maximum number of usable buffers (default 32).
*
* @return The number of buffers.
*/
u32 GetBufferCount();
/**
* Get the total number of samples played by this system.
*
* @return The number of samples.
*/
u64 GetPlayedSampleCount() const;
/**
* Is this system using a USB device?
*
* @return True if using a USB device, otherwise false.
*/
bool IsUac() const;
private:
/// Core system
Core::System& system;
/// (Unused)
u32 handle{};
/// (Unused)
u64 applet_resource_user_id{};
/// Buffer event, signalled when a buffer is ready
Kernel::KEvent* buffer_event;
/// Session id of this system
size_t session_id{};
/// Device session for this system
std::unique_ptr<DeviceSession> session;
/// Audio buffers in use by this system
AudioBuffers<BufferCount> buffers{BufferCount};
/// Sample rate of this system
u32 sample_rate{};
/// Sample format of this system
SampleFormat sample_format{SampleFormat::PcmInt16};
/// Channel count of this system
u16 channel_count{};
/// State of this system
std::atomic<State> state{State::Stopped};
/// Name of this system
std::string name{};
/// Volume of this system
f32 volume{1.0f};
/// Is this system's device USB?
bool is_uac{false};
};
} // namespace AudioCore::AudioIn

100
src/audio_core/out/audio_out.cpp Executable file
View file

@ -0,0 +1,100 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_out_manager.h"
#include "audio_core/out/audio_out.h"
#include "core/hle/kernel/k_event.h"
namespace AudioCore::AudioOut {
Out::Out(Core::System& system_, Manager& manager_, Kernel::KEvent* event_, size_t session_id_)
: manager{manager_}, parent_mutex{manager.mutex}, event{event_}, system{system_, event,
session_id_} {}
void Out::Free() {
std::scoped_lock l{parent_mutex};
manager.ReleaseSessionId(system.GetSessionId());
}
System& Out::GetSystem() {
return system;
}
AudioOut::State Out::GetState() {
std::scoped_lock l{parent_mutex};
return system.GetState();
}
Result Out::StartSystem() {
std::scoped_lock l{parent_mutex};
return system.Start();
}
void Out::StartSession() {
std::scoped_lock l{parent_mutex};
system.StartSession();
}
Result Out::StopSystem() {
std::scoped_lock l{parent_mutex};
return system.Stop();
}
Result Out::AppendBuffer(const AudioOutBuffer& buffer, const u64 tag) {
std::scoped_lock l{parent_mutex};
if (system.AppendBuffer(buffer, tag)) {
return ResultSuccess;
}
return Service::Audio::ERR_BUFFER_COUNT_EXCEEDED;
}
void Out::ReleaseAndRegisterBuffers() {
std::scoped_lock l{parent_mutex};
if (system.GetState() == State::Started) {
system.ReleaseBuffers();
system.RegisterBuffers();
}
}
bool Out::FlushAudioOutBuffers() {
std::scoped_lock l{parent_mutex};
return system.FlushAudioOutBuffers();
}
u32 Out::GetReleasedBuffers(std::span<u64> tags) {
std::scoped_lock l{parent_mutex};
return system.GetReleasedBuffers(tags);
}
Kernel::KReadableEvent& Out::GetBufferEvent() {
std::scoped_lock l{parent_mutex};
return event->GetReadableEvent();
}
f32 Out::GetVolume() {
std::scoped_lock l{parent_mutex};
return system.GetVolume();
}
void Out::SetVolume(const f32 volume) {
std::scoped_lock l{parent_mutex};
system.SetVolume(volume);
}
bool Out::ContainsAudioBuffer(const u64 tag) {
std::scoped_lock l{parent_mutex};
return system.ContainsAudioBuffer(tag);
}
u32 Out::GetBufferCount() {
std::scoped_lock l{parent_mutex};
return system.GetBufferCount();
}
u64 Out::GetPlayedSampleCount() {
std::scoped_lock l{parent_mutex};
return system.GetPlayedSampleCount();
}
} // namespace AudioCore::AudioOut

147
src/audio_core/out/audio_out.h Executable file
View file

@ -0,0 +1,147 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <mutex>
#include "audio_core/out/audio_out_system.h"
namespace Core {
class System;
}
namespace Kernel {
class KEvent;
class KReadableEvent;
} // namespace Kernel
namespace AudioCore::AudioOut {
class Manager;
/**
* Interface between the service and audio out system. Mainly responsible for forwarding service
* calls to the system.
*/
class Out {
public:
explicit Out(Core::System& system, Manager& manager, Kernel::KEvent* event, size_t session_id);
/**
* Free this audio out from the audio out manager.
*/
void Free();
/**
* Get this audio out's system.
*/
System& GetSystem();
/**
* Get the current state.
*
* @return Started or Stopped.
*/
AudioOut::State GetState();
/**
* Start the system
*
* @return Result code
*/
Result StartSystem();
/**
* Start the system's device session.
*/
void StartSession();
/**
* Stop the system.
*
* @return Result code
*/
Result StopSystem();
/**
* Append a new buffer to the system, the buffer event will be signalled when it is filled.
*
* @param buffer - The new buffer to append.
* @param tag - Unique tag for this buffer.
* @return Result code.
*/
Result AppendBuffer(const AudioOutBuffer& buffer, u64 tag);
/**
* Release all completed buffers, and register any appended.
*/
void ReleaseAndRegisterBuffers();
/**
* Flush all buffers.
*/
bool FlushAudioOutBuffers();
/**
* Get all of the currently released buffers.
*
* @param tags - Output container for the buffer tags which were released.
* @return The number of buffers released.
*/
u32 GetReleasedBuffers(std::span<u64> tags);
/**
* Get the buffer event for this audio out, this event will be signalled when a buffer is
* filled.
* @return The buffer event.
*/
Kernel::KReadableEvent& GetBufferEvent();
/**
* Get the current system volume.
*
* @return The current volume.
*/
f32 GetVolume();
/**
* Set the system volume.
*
* @param volume - The volume to set.
*/
void SetVolume(f32 volume);
/**
* Check if a buffer is in the system.
*
* @param tag - The tag to search for.
* @return True if the buffer is in the system, otherwise false.
*/
bool ContainsAudioBuffer(u64 tag);
/**
* Get the maximum number of buffers.
*
* @return The maximum number of buffers.
*/
u32 GetBufferCount();
/**
* Get the total played sample count for this audio out.
*
* @return The played sample count.
*/
u64 GetPlayedSampleCount();
private:
/// The AudioOut::Manager this audio out is registered with
Manager& manager;
/// Manager's mutex
std::recursive_mutex& parent_mutex;
/// Buffer event, signalled when buffers are ready to be released
Kernel::KEvent* event;
/// Main audio out system
System system;
};
} // namespace AudioCore::AudioOut

View file

@ -0,0 +1,207 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <mutex>
#include "audio_core/audio_event.h"
#include "audio_core/audio_manager.h"
#include "audio_core/out/audio_out_system.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/kernel/k_event.h"
namespace AudioCore::AudioOut {
System::System(Core::System& system_, Kernel::KEvent* event_, size_t session_id_)
: system{system_}, buffer_event{event_},
session_id{session_id_}, session{std::make_unique<DeviceSession>(system_)} {}
System::~System() {
Finalize();
}
void System::Finalize() {
Stop();
session->Finalize();
buffer_event->GetWritableEvent().Signal();
}
std::string_view System::GetDefaultOutputDeviceName() {
return "DeviceOut";
}
Result System::IsConfigValid(std::string_view device_name, const AudioOutParameter& in_params) {
if ((device_name.size() > 0) && (device_name != GetDefaultOutputDeviceName())) {
return Service::Audio::ERR_INVALID_DEVICE_NAME;
}
if (in_params.sample_rate != TargetSampleRate && in_params.sample_rate > 0) {
return Service::Audio::ERR_INVALID_SAMPLE_RATE;
}
if (in_params.channel_count == 0 || in_params.channel_count == 2 ||
in_params.channel_count == 6) {
return ResultSuccess;
}
return Service::Audio::ERR_INVALID_CHANNEL_COUNT;
}
Result System::Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle_,
u64& applet_resource_user_id_) {
auto result = IsConfigValid(device_name, in_params);
if (result.IsError()) {
return result;
}
handle = handle_;
applet_resource_user_id = applet_resource_user_id_;
if (device_name.empty() || device_name[0] == '\0') {
name = std::string(GetDefaultOutputDeviceName());
} else {
name = std::move(device_name);
}
sample_rate = TargetSampleRate;
sample_format = SampleFormat::PcmInt16;
channel_count = in_params.channel_count <= 2 ? 2 : 6;
volume = 1.0f;
return ResultSuccess;
}
void System::StartSession() {
session->Start();
}
size_t System::GetSessionId() const {
return session_id;
}
Result System::Start() {
if (state != State::Stopped) {
return Service::Audio::ERR_OPERATION_FAILED;
}
session->Initialize(name, sample_format, channel_count, session_id, handle,
applet_resource_user_id, Sink::StreamType::Out);
session->SetVolume(volume);
session->Start();
state = State::Started;
std::vector<AudioBuffer> buffers_to_flush{};
buffers.RegisterBuffers(buffers_to_flush);
session->AppendBuffers(buffers_to_flush);
return ResultSuccess;
}
Result System::Stop() {
if (state == State::Started) {
session->Stop();
session->SetVolume(0.0f);
state = State::Stopped;
}
return ResultSuccess;
}
bool System::AppendBuffer(const AudioOutBuffer& buffer, u64 tag) {
if (buffers.GetTotalBufferCount() == BufferCount) {
return false;
}
AudioBuffer new_buffer{
.played_timestamp = 0, .samples = buffer.samples, .tag = tag, .size = buffer.size};
buffers.AppendBuffer(new_buffer);
RegisterBuffers();
return true;
}
void System::RegisterBuffers() {
if (state == State::Started) {
std::vector<AudioBuffer> registered_buffers{};
buffers.RegisterBuffers(registered_buffers);
session->AppendBuffers(registered_buffers);
}
}
void System::ReleaseBuffers() {
bool signal{buffers.ReleaseBuffers(system.CoreTiming(), *session)};
if (signal) {
// Signal if any buffer was released, or if none are registered, we need more.
buffer_event->GetWritableEvent().Signal();
}
}
u32 System::GetReleasedBuffers(std::span<u64> tags) {
return buffers.GetReleasedBuffers(tags);
}
bool System::FlushAudioOutBuffers() {
if (state != State::Started) {
return false;
}
u32 buffers_released{};
buffers.FlushBuffers(buffers_released);
if (buffers_released > 0) {
buffer_event->GetWritableEvent().Signal();
}
return true;
}
u16 System::GetChannelCount() const {
return channel_count;
}
u32 System::GetSampleRate() const {
return sample_rate;
}
SampleFormat System::GetSampleFormat() const {
return sample_format;
}
State System::GetState() {
switch (state) {
case State::Started:
case State::Stopped:
return state;
default:
LOG_ERROR(Service_Audio, "AudioOut invalid state!");
state = State::Stopped;
break;
}
return state;
}
std::string System::GetName() const {
return name;
}
f32 System::GetVolume() const {
return volume;
}
void System::SetVolume(const f32 volume_) {
volume = volume_;
session->SetVolume(volume_);
}
bool System::ContainsAudioBuffer(const u64 tag) {
return buffers.ContainsBuffer(tag);
}
u32 System::GetBufferCount() {
return buffers.GetAppendedRegisteredCount();
}
u64 System::GetPlayedSampleCount() const {
return session->GetPlayedSampleCount();
}
} // namespace AudioCore::AudioOut

View file

@ -0,0 +1,257 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <atomic>
#include <memory>
#include <span>
#include <string>
#include "audio_core/common/common.h"
#include "audio_core/device/audio_buffers.h"
#include "audio_core/device/device_session.h"
#include "core/hle/service/audio/errors.h"
namespace Core {
class System;
}
namespace Kernel {
class KEvent;
}
namespace AudioCore::AudioOut {
constexpr SessionTypes SessionType = SessionTypes::AudioOut;
struct AudioOutParameter {
/* 0x0 */ s32_le sample_rate;
/* 0x4 */ u16_le channel_count;
/* 0x6 */ u16_le reserved;
};
static_assert(sizeof(AudioOutParameter) == 0x8, "AudioOutParameter is an invalid size");
struct AudioOutParameterInternal {
/* 0x0 */ u32_le sample_rate;
/* 0x4 */ u32_le channel_count;
/* 0x8 */ u32_le sample_format;
/* 0xC */ u32_le state;
};
static_assert(sizeof(AudioOutParameterInternal) == 0x10,
"AudioOutParameterInternal is an invalid size");
struct AudioOutBuffer {
/* 0x00 */ AudioOutBuffer* next;
/* 0x08 */ VAddr samples;
/* 0x10 */ u64 capacity;
/* 0x18 */ u64 size;
/* 0x20 */ u64 offset;
};
static_assert(sizeof(AudioOutBuffer) == 0x28, "AudioOutBuffer is an invalid size");
enum class State {
Started,
Stopped,
};
/**
* Controls and drives audio output.
*/
class System {
public:
explicit System(Core::System& system, Kernel::KEvent* event, size_t session_id);
~System();
/**
* Get the default audio output device name.
*
* @return The default audio output device name.
*/
std::string_view GetDefaultOutputDeviceName();
/**
* Is the given initialize config valid?
*
* @param device_name - The name of the requested output device.
* @param in_params - Input parameters, see AudioOutParameter.
* @return Result code.
*/
Result IsConfigValid(std::string_view device_name, const AudioOutParameter& in_params);
/**
* Initialize this system.
*
* @param device_name - The name of the requested output device.
* @param in_params - Input parameters, see AudioOutParameter.
* @param handle - Unused.
* @param applet_resource_user_id - Unused.
* @return Result code.
*/
Result Initialize(std::string& device_name, const AudioOutParameter& in_params, u32 handle,
u64& applet_resource_user_id);
/**
* Start this system.
*
* @return Result code.
*/
Result Start();
/**
* Stop this system.
*
* @return Result code.
*/
Result Stop();
/**
* Finalize this system.
*/
void Finalize();
/**
* Start this system's device session.
*/
void StartSession();
/**
* Get this system's id.
*/
size_t GetSessionId() const;
/**
* Append a new buffer to the device.
*
* @param buffer - New buffer to append.
* @param tag - Unique tag of the buffer.
* @return True if the buffer was appended, otherwise false.
*/
bool AppendBuffer(const AudioOutBuffer& buffer, u64 tag);
/**
* Register all appended buffers.
*/
void RegisterBuffers();
/**
* Release all registered buffers.
*/
void ReleaseBuffers();
/**
* Get all released buffers.
*
* @param tags - Container to be filled with the released buffers' tags.
* @return The number of buffers released.
*/
u32 GetReleasedBuffers(std::span<u64> tags);
/**
* Flush all appended and registered buffers.
*
* @return True if buffers were successfully flushed, otherwise false.
*/
bool FlushAudioOutBuffers();
/**
* Get this system's current channel count.
*
* @return The channel count.
*/
u16 GetChannelCount() const;
/**
* Get this system's current sample rate.
*
* @return The sample rate.
*/
u32 GetSampleRate() const;
/**
* Get this system's current sample format.
*
* @return The sample format.
*/
SampleFormat GetSampleFormat() const;
/**
* Get this system's current state.
*
* @return The current state.
*/
State GetState();
/**
* Get this system's name.
*
* @return The system's name.
*/
std::string GetName() const;
/**
* Get this system's current volume.
*
* @return The system's current volume.
*/
f32 GetVolume() const;
/**
* Set this system's current volume.
*
* @param The new volume.
*/
void SetVolume(f32 volume);
/**
* Does the system contain this buffer?
*
* @param tag - Unique tag to search for.
* @return True if the buffer is in the system, otherwise false.
*/
bool ContainsAudioBuffer(u64 tag);
/**
* Get the maximum number of usable buffers (default 32).
*
* @return The number of buffers.
*/
u32 GetBufferCount();
/**
* Get the total number of samples played by this system.
*
* @return The number of samples.
*/
u64 GetPlayedSampleCount() const;
private:
/// Core system
Core::System& system;
/// (Unused)
u32 handle{};
/// (Unused)
u64 applet_resource_user_id{};
/// Buffer event, signalled when a buffer is ready
Kernel::KEvent* buffer_event;
/// Session id of this system
size_t session_id{};
/// Device session for this system
std::unique_ptr<DeviceSession> session;
/// Audio buffers in use by this system
AudioBuffers<BufferCount> buffers{BufferCount};
/// Sample rate of this system
u32 sample_rate{};
/// Sample format of this system
SampleFormat sample_format{SampleFormat::PcmInt16};
/// Channel count of this system
u16 channel_count{};
/// State of this system
std::atomic<State> state{State::Stopped};
/// Name of this system
std::string name{};
/// Volume of this system
f32 volume{1.0f};
};
} // namespace AudioCore::AudioOut

View file

@ -0,0 +1,118 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/adsp.h"
#include "audio_core/renderer/adsp/command_buffer.h"
#include "audio_core/sink/sink.h"
#include "common/logging/log.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/memory.h"
namespace AudioCore::AudioRenderer::ADSP {
ADSP::ADSP(Core::System& system_, Sink::Sink& sink_)
: system{system_}, memory{system.Memory()}, sink{sink_} {}
ADSP::~ADSP() {
ClearCommandBuffers();
}
State ADSP::GetState() const {
if (running) {
return State::Started;
}
return State::Stopped;
}
AudioRenderer_Mailbox* ADSP::GetRenderMailbox() {
return &render_mailbox;
}
void ADSP::ClearRemainCount(const u32 session_id) {
render_mailbox.ClearRemainCount(session_id);
}
u64 ADSP::GetSignalledTick() const {
return render_mailbox.GetSignalledTick();
}
u64 ADSP::GetTimeTaken() const {
return render_mailbox.GetRenderTimeTaken();
}
u64 ADSP::GetRenderTimeTaken(const u32 session_id) {
return render_mailbox.GetCommandBuffer(session_id).render_time_taken;
}
u32 ADSP::GetRemainCommandCount(const u32 session_id) const {
return render_mailbox.GetRemainCommandCount(session_id);
}
void ADSP::SendCommandBuffer(const u32 session_id, CommandBuffer& command_buffer) {
render_mailbox.SetCommandBuffer(session_id, command_buffer);
}
u64 ADSP::GetRenderingStartTick(const u32 session_id) {
return render_mailbox.GetSignalledTick() +
render_mailbox.GetCommandBuffer(session_id).render_time_taken;
}
bool ADSP::Start() {
if (running) {
return running;
}
running = true;
systems_active++;
audio_renderer = std::make_unique<AudioRenderer>(system);
audio_renderer->Start(&render_mailbox);
render_mailbox.HostSendMessage(RenderMessage::AudioRenderer_InitializeOK);
if (render_mailbox.HostWaitMessage() != RenderMessage::AudioRenderer_InitializeOK) {
LOG_ERROR(
Service_Audio,
"Host Audio Renderer -- Failed to receive initialize message response from ADSP!");
}
return running;
}
void ADSP::Stop() {
systems_active--;
if (running && systems_active == 0) {
{
std::scoped_lock l{mailbox_lock};
render_mailbox.HostSendMessage(RenderMessage::AudioRenderer_Shutdown);
if (render_mailbox.HostWaitMessage() != RenderMessage::AudioRenderer_Shutdown) {
LOG_ERROR(Service_Audio, "Host Audio Renderer -- Failed to receive shutdown "
"message response from ADSP!");
}
}
audio_renderer->Stop();
running = false;
}
}
void ADSP::Signal() {
const auto signalled_tick{system.CoreTiming().GetClockTicks()};
render_mailbox.SetSignalledTick(signalled_tick);
render_mailbox.HostSendMessage(RenderMessage::AudioRenderer_Render);
}
void ADSP::Wait() {
std::scoped_lock l{mailbox_lock};
auto response{render_mailbox.HostWaitMessage()};
if (response != RenderMessage::AudioRenderer_RenderResponse) {
LOG_ERROR(Service_Audio, "Invalid ADSP response message, expected 0x{:02X}, got 0x{:02X}",
static_cast<u32>(RenderMessage::AudioRenderer_RenderResponse),
static_cast<u32>(response));
}
ClearCommandBuffers();
}
void ADSP::ClearCommandBuffers() {
render_mailbox.ClearCommandBuffers();
}
} // namespace AudioCore::AudioRenderer::ADSP

View file

@ -0,0 +1,173 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <memory>
#include <mutex>
#include "audio_core/renderer/adsp/audio_renderer.h"
#include "common/common_types.h"
namespace Core {
namespace Memory {
class Memory;
}
class System;
} // namespace Core
namespace AudioCore {
namespace Sink {
class Sink;
}
namespace AudioRenderer::ADSP {
struct CommandBuffer;
enum class State {
Started,
Stopped,
};
/**
* Represents the ADSP embedded within the audio sysmodule.
* This is a 32-bit Linux4Tegra kernel from nVidia, which is launched with the sysmodule on boot.
*
* The kernel will run apps you program for it, Nintendo have the following:
*
* Gmix - Responsible for mixing final audio and sending it out to hardware. This is last place all
* audio samples end up, and we skip it entirely, since we have very different backends and
* mixing is implicitly handled by the OS (but also due to lack of research/simplicity).
*
* AudioRenderer - Receives command lists generated by the audio render
* system, processes them, and sends the samples to Gmix.
*
* OpusDecoder - Contains libopus, and controls processing Opus audio and sends it to Gmix.
* Not much research done here, TODO if needed.
*
* We only implement the AudioRenderer for now.
*
* Communication for the apps is done through mailboxes, and some shared memory.
*/
class ADSP {
public:
explicit ADSP(Core::System& system, Sink::Sink& sink);
~ADSP();
/**
* Start the ADSP.
*
* @return True if started or already running, otherwise false.
*/
bool Start();
/**
* Stop the ADSP.
*
* @return True if started or already running, otherwise false.
*/
void Stop();
/**
* Get the ADSP's state.
*
* @return Started or Stopped.
*/
State GetState() const;
/**
* Get the AudioRenderer mailbox to communicate with it.
*
* @return The AudioRenderer mailbox.
*/
AudioRenderer_Mailbox* GetRenderMailbox();
/**
* Get the tick the ADSP was signalled.
*
* @return The tick the ADSP was signalled.
*/
u64 GetSignalledTick() const;
/**
* Get the total time it took for the ADSP to run the last command lists (both command lists).
*
* @return The tick the ADSP was signalled.
*/
u64 GetTimeTaken() const;
/**
* Get the last time a given command list took to run.
*
* @param session_id - The session id to check (0 or 1).
* @return The time it took.
*/
u64 GetRenderTimeTaken(u32 session_id);
/**
* Clear the remaining command count for a given session.
*
* @param session_id - The session id to check (0 or 1).
*/
void ClearRemainCount(u32 session_id);
/**
* Get the remaining number of commands left to process for a command list.
*
* @param session_id - The session id to check (0 or 1).
* @return The number of commands remaining.
*/
u32 GetRemainCommandCount(u32 session_id) const;
/**
* Get the last tick a command list started processing.
*
* @param session_id - The session id to check (0 or 1).
* @return The last tick the given command list started.
*/
u64 GetRenderingStartTick(u32 session_id);
/**
* Set a command buffer to be processed.
*
* @param session_id - The session id to check (0 or 1).
* @param command_buffer - The command buffer to process.
*/
void SendCommandBuffer(u32 session_id, CommandBuffer& command_buffer);
/**
* Clear the command buffers (does not clear the time taken or the remaining command count)
*/
void ClearCommandBuffers();
/**
* Signal the AudioRenderer to begin processing.
*/
void Signal();
/**
* Wait for the AudioRenderer to finish processing.
*/
void Wait();
private:
/// Core system
Core::System& system;
/// Core memory
Core::Memory::Memory& memory;
/// Number of systems active, used to prevent accidental shutdowns
u8 systems_active{0};
/// ADSP running state
std::atomic<bool> running{false};
/// Output sink used by the ADSP
Sink::Sink& sink;
/// AudioRenderer app
std::unique_ptr<AudioRenderer> audio_renderer{};
/// Communication for the AudioRenderer
AudioRenderer_Mailbox render_mailbox{};
/// Mailbox lock ffor the render mailbox
std::mutex mailbox_lock;
};
} // namespace AudioRenderer::ADSP
} // namespace AudioCore

View file

@ -0,0 +1,230 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <chrono>
#include "audio_core/audio_core.h"
#include "audio_core/common/common.h"
#include "audio_core/renderer/adsp/audio_renderer.h"
#include "audio_core/sink/sink.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "common/thread.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
MICROPROFILE_DEFINE(Audio_Renderer, "Audio", "DSP", MP_RGB(60, 19, 97));
namespace AudioCore::AudioRenderer::ADSP {
void AudioRenderer_Mailbox::HostSendMessage(RenderMessage message_) {
adsp_messages.enqueue(message_);
adsp_event.Set();
}
RenderMessage AudioRenderer_Mailbox::HostWaitMessage() {
host_event.Wait();
RenderMessage msg{RenderMessage::Invalid};
if (!host_messages.try_dequeue(msg)) {
LOG_ERROR(Service_Audio, "Failed to dequeue host message!");
}
return msg;
}
void AudioRenderer_Mailbox::ADSPSendMessage(const RenderMessage message_) {
host_messages.enqueue(message_);
host_event.Set();
}
RenderMessage AudioRenderer_Mailbox::ADSPWaitMessage() {
adsp_event.Wait();
RenderMessage msg{RenderMessage::Invalid};
if (!adsp_messages.try_dequeue(msg)) {
LOG_ERROR(Service_Audio, "Failed to dequeue ADSP message!");
}
return msg;
}
CommandBuffer& AudioRenderer_Mailbox::GetCommandBuffer(const s32 session_id) {
return command_buffers[session_id];
}
void AudioRenderer_Mailbox::SetCommandBuffer(const u32 session_id, CommandBuffer& buffer) {
command_buffers[session_id] = buffer;
}
u64 AudioRenderer_Mailbox::GetRenderTimeTaken() const {
return command_buffers[0].render_time_taken + command_buffers[1].render_time_taken;
}
u64 AudioRenderer_Mailbox::GetSignalledTick() const {
return signalled_tick;
}
void AudioRenderer_Mailbox::SetSignalledTick(const u64 tick) {
signalled_tick = tick;
}
void AudioRenderer_Mailbox::ClearRemainCount(const u32 session_id) {
command_buffers[session_id].remaining_command_count = 0;
}
u32 AudioRenderer_Mailbox::GetRemainCommandCount(const u32 session_id) const {
return command_buffers[session_id].remaining_command_count;
}
void AudioRenderer_Mailbox::ClearCommandBuffers() {
command_buffers[0].buffer = 0;
command_buffers[0].size = 0;
command_buffers[0].reset_buffers = false;
command_buffers[1].buffer = 0;
command_buffers[1].size = 0;
command_buffers[1].reset_buffers = false;
}
AudioRenderer::AudioRenderer(Core::System& system_)
: system{system_}, sink{system.AudioCore().GetOutputSink()} {
CreateSinkStreams();
}
AudioRenderer::~AudioRenderer() {
Stop();
for (auto& stream : streams) {
if (stream) {
sink.CloseStream(stream);
}
stream = nullptr;
}
}
void AudioRenderer::Start(AudioRenderer_Mailbox* mailbox_) {
if (running) {
return;
}
mailbox = mailbox_;
thread = std::thread(&AudioRenderer::ThreadFunc, this);
for (auto& stream : streams) {
stream->Start();
}
running = true;
}
void AudioRenderer::Stop() {
if (!running) {
return;
}
for (auto& stream : streams) {
stream->Stop();
}
thread.join();
running = false;
}
void AudioRenderer::CreateSinkStreams() {
u32 channels{sink.GetDeviceChannels()};
for (u32 i = 0; i < MaxRendererSessions; i++) {
std::string name{fmt::format("ADSP_RenderStream-{}", i)};
streams[i] = sink.AcquireSinkStream(system, channels, name,
::AudioCore::Sink::StreamType::Render, &render_event);
streams[i]->SetSystemChannels(streams[i]->GetDeviceChannels());
}
}
void AudioRenderer::ThreadFunc() {
constexpr char name[]{"yuzu:AudioRenderer"};
MicroProfileOnThreadCreate(name);
Common::SetCurrentThreadName(name);
Common::SetCurrentThreadPriority(Common::ThreadPriority::Critical);
if (mailbox->ADSPWaitMessage() != RenderMessage::AudioRenderer_InitializeOK) {
LOG_ERROR(Service_Audio,
"ADSP Audio Renderer -- Failed to receive initialize message from host!");
return;
}
mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_InitializeOK);
constexpr u64 max_process_time{2'304'000ULL};
while (true) {
auto message{mailbox->ADSPWaitMessage()};
switch (message) {
case RenderMessage::AudioRenderer_Shutdown:
mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_Shutdown);
return;
case RenderMessage::AudioRenderer_Render: {
std::array<bool, MaxRendererSessions> buffers_reset{};
std::array<u64, MaxRendererSessions> render_times_taken{};
const auto start_time{system.CoreTiming().GetClockTicks()};
for (u32 index = 0; index < 2; index++) {
auto& command_buffer{mailbox->GetCommandBuffer(index)};
auto& command_list_processor{command_list_processors[index]};
// Check this buffer is valid, as it may not be used.
if (command_buffer.buffer != 0) {
// If there are no remaining commands (from the previous list),
// this is a new command list, initalize it.
if (command_buffer.remaining_command_count == 0) {
command_list_processor.Initialize(system, command_buffer.buffer,
command_buffer.size, streams[index]);
}
if (command_buffer.reset_buffers && !buffers_reset[index]) {
streams[index]->ClearQueue();
buffers_reset[index] = true;
}
u64 max_time{max_process_time};
if (index == 1 && command_buffer.applet_resource_user_id ==
mailbox->GetCommandBuffer(0).applet_resource_user_id) {
max_time = max_process_time -
Core::Timing::CyclesToNs(render_times_taken[0]).count();
if (render_times_taken[0] > max_process_time) {
max_time = 0;
}
}
max_time = std::min(command_buffer.time_limit, max_time);
command_list_processor.SetProcessTimeMax(max_time);
// Process the command list
{
MICROPROFILE_SCOPE(Audio_Renderer);
render_times_taken[index] =
command_list_processor.Process(index) - start_time;
}
// If the stream queue is building up too much, wait for a signal
// from the backend that a buffer was consumed.
// In practice this will wait longer than 1 buffer due to timing.
auto stream{command_list_processor.GetOutputSinkStream()};
if (stream->GetQueueSize() >= 4) {
render_event.WaitFor(std::chrono::milliseconds(5));
}
const auto end_time{system.CoreTiming().GetClockTicks()};
command_buffer.remaining_command_count =
command_list_processor.GetRemainingCommandCount();
command_buffer.render_time_taken = end_time - start_time;
}
}
mailbox->ADSPSendMessage(RenderMessage::AudioRenderer_RenderResponse);
} break;
default:
LOG_WARNING(Service_Audio,
"ADSP AudioRenderer received an invalid message, msg={:02X}!",
static_cast<u32>(message));
break;
}
}
}
} // namespace AudioCore::AudioRenderer::ADSP

View file

@ -0,0 +1,202 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <memory>
#include <thread>
#include "audio_core/renderer/adsp/command_buffer.h"
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "common/common_types.h"
#include "common/reader_writer_queue.h"
#include "common/thread.h"
namespace Core {
class System;
}
namespace AudioCore {
namespace Sink {
class Sink;
}
namespace AudioRenderer::ADSP {
enum class RenderMessage {
/* 0x00 */ Invalid,
/* 0x01 */ AudioRenderer_MapUnmap_Map,
/* 0x02 */ AudioRenderer_MapUnmap_MapResponse,
/* 0x03 */ AudioRenderer_MapUnmap_Unmap,
/* 0x04 */ AudioRenderer_MapUnmap_UnmapResponse,
/* 0x05 */ AudioRenderer_MapUnmap_InvalidateCache,
/* 0x06 */ AudioRenderer_MapUnmap_InvalidateCacheResponse,
/* 0x07 */ AudioRenderer_MapUnmap_Shutdown,
/* 0x08 */ AudioRenderer_MapUnmap_ShutdownResponse,
/* 0x16 */ AudioRenderer_InitializeOK = 0x16,
/* 0x20 */ AudioRenderer_RenderResponse = 0x20,
/* 0x2A */ AudioRenderer_Render = 0x2A,
/* 0x34 */ AudioRenderer_Shutdown = 0x34,
};
/**
* A mailbox for the AudioRenderer, allowing communication between the host and the AudioRenderer
* running on the ADSP.
*/
class AudioRenderer_Mailbox {
public:
/**
* Send a message from the host to the AudioRenderer.
*
* @param message_ - The message to send to the AudioRenderer.
*/
void HostSendMessage(RenderMessage message);
/**
* Host wait for a message from the AudioRenderer.
*
* @return The message returned from the AudioRenderer.
*/
RenderMessage HostWaitMessage();
/**
* Send a message from the AudioRenderer to the host.
*
* @param message_ - The message to send to the host.
*/
void ADSPSendMessage(RenderMessage message);
/**
* AudioRenderer wait for a message from the host.
*
* @return The message returned from the AudioRenderer.
*/
RenderMessage ADSPWaitMessage();
/**
* Get the command buffer with the given session id (0 or 1).
*
* @param session_id - The session id to get (0 or 1).
* @return The command buffer.
*/
CommandBuffer& GetCommandBuffer(s32 session_id);
/**
* Set the command buffer with the given session id (0 or 1).
*
* @param session_id - The session id to get (0 or 1).
* @param buffer - The command buffer to set.
*/
void SetCommandBuffer(u32 session_id, CommandBuffer& buffer);
/**
* Get the total render time taken for the last command lists sent.
*
* @return Total render time taken for the last command lists.
*/
u64 GetRenderTimeTaken() const;
/**
* Get the tick the AudioRenderer was signalled.
*
* @return The tick the AudioRenderer was signalled.
*/
u64 GetSignalledTick() const;
/**
* Set the tick the AudioRenderer was signalled.
*
* @param tick - The tick the AudioRenderer was signalled.
*/
void SetSignalledTick(u64 tick);
/**
* Clear the remaining command count.
*
* @param session_id - Index for which command list to clear (0 or 1).
*/
void ClearRemainCount(u32 session_id);
/**
* Get the remaining command count for a given command list.
*
* @param session_id - Index for which command list to clear (0 or 1).
* @return The remaining command count.
*/
u32 GetRemainCommandCount(u32 session_id) const;
/**
* Clear the command buffers (does not clear the time taken or the remaining command count).
*/
void ClearCommandBuffers();
private:
/// Host signalling event
Common::Event host_event{};
/// AudioRenderer signalling event
Common::Event adsp_event{};
/// Host message queue
Common::ReaderWriterQueue<RenderMessage> host_messages{};
/// AudioRenderer message queue
Common::ReaderWriterQueue<RenderMessage> adsp_messages{};
/// Command buffers
std::array<CommandBuffer, MaxRendererSessions> command_buffers{};
/// Tick the AudioRnederer was signalled
u64 signalled_tick{};
};
/**
* The AudioRenderer application running on the ADSP.
*/
class AudioRenderer {
public:
explicit AudioRenderer(Core::System& system);
~AudioRenderer();
/**
* Start the AudioRenderer.
*
* @param The mailbox to use for this session.
*/
void Start(AudioRenderer_Mailbox* mailbox);
/**
* Stop the AudioRenderer.
*/
void Stop();
private:
/**
* Main AudioRenderer thread, responsible for processing the command lists.
*/
void ThreadFunc();
/**
* Creates the streams which will receive the processed samples.
*/
void CreateSinkStreams();
/// Core system
Core::System& system;
/// Main thread
std::thread thread{};
/// The current state
std::atomic<bool> running{};
/// The active mailbox
AudioRenderer_Mailbox* mailbox{};
/// The command lists to process
std::array<CommandListProcessor, MaxRendererSessions> command_list_processors{};
/// The output sink the AudioRenderer will use
Sink::Sink& sink;
/// The streams which will receive the processed samples
std::array<Sink::SinkStream*, MaxRendererSessions> streams;
/// An event signalled from the backend when a buffer is consumed, used for timing.
Common::Event render_event{};
};
} // namespace AudioRenderer::ADSP
} // namespace AudioCore

View file

@ -0,0 +1,21 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "audio_core/common/common.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer::ADSP {
struct CommandBuffer {
CpuAddr buffer;
u64 size;
u64 time_limit;
u32 remaining_command_count;
bool reset_buffers;
u64 applet_resource_user_id;
u64 render_time_taken;
};
} // namespace AudioCore::AudioRenderer::ADSP

View file

@ -0,0 +1,109 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string>
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/command_list_header.h"
#include "audio_core/renderer/command/commands.h"
#include "common/settings.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/core_timing_util.h"
#include "core/memory.h"
namespace AudioCore::AudioRenderer::ADSP {
void CommandListProcessor::Initialize(Core::System& system_, CpuAddr buffer, u64 size,
Sink::SinkStream* stream_) {
system = &system_;
memory = &system->Memory();
stream = stream_;
header = reinterpret_cast<CommandListHeader*>(buffer);
commands = reinterpret_cast<u8*>(buffer + sizeof(CommandListHeader));
commands_buffer_size = size;
command_count = header->command_count;
sample_count = header->sample_count;
target_sample_rate = header->sample_rate;
mix_buffers = header->samples_buffer;
buffer_count = header->buffer_count;
processed_command_count = 0;
}
void CommandListProcessor::SetProcessTimeMax(const u64 time) {
max_process_time = time;
}
u32 CommandListProcessor::GetRemainingCommandCount() const {
return command_count - processed_command_count;
}
void CommandListProcessor::SetBuffer(const CpuAddr buffer, const u64 size) {
commands = reinterpret_cast<u8*>(buffer + sizeof(CommandListHeader));
commands_buffer_size = size;
}
Sink::SinkStream* CommandListProcessor::GetOutputSinkStream() const {
return stream;
}
u64 CommandListProcessor::Process(u32 session_id) {
const auto start_time_{system->CoreTiming().GetClockTicks()};
const auto command_base{CpuAddr(commands)};
if (processed_command_count > 0) {
current_processing_time += start_time_ - end_time;
} else {
start_time = start_time_;
current_processing_time = 0;
}
std::string dump{fmt::format("\nSession {}\n", session_id)};
for (u32 index = 0; index < command_count; index++) {
auto& command{*reinterpret_cast<ICommand*>(commands)};
if (command.magic != 0xCAFEBABE) {
LOG_ERROR(Service_Audio, "Command has invalid magic! Expected 0xCAFEBABE, got {:08X}",
command.magic);
return system->CoreTiming().GetClockTicks() - start_time_;
}
auto current_offset{CpuAddr(commands) - command_base};
if (current_offset + command.size > commands_buffer_size) {
LOG_ERROR(Service_Audio,
"Command exceeded command buffer, buffer size {:08X}, command ends at {:08X}",
commands_buffer_size,
CpuAddr(commands) + command.size - sizeof(CommandListHeader));
return system->CoreTiming().GetClockTicks() - start_time_;
}
if (Settings::values.dump_audio_commands) {
command.Dump(*this, dump);
}
if (!command.Verify(*this)) {
break;
}
if (command.enabled) {
command.Process(*this);
} else {
dump += fmt::format("\tDisabled!\n");
}
processed_command_count++;
commands += command.size;
}
if (Settings::values.dump_audio_commands && dump != last_dump) {
LOG_WARNING(Service_Audio, "{}", dump);
last_dump = dump;
}
end_time = system->CoreTiming().GetClockTicks();
return end_time - start_time_;
}
} // namespace AudioCore::AudioRenderer::ADSP

View file

@ -0,0 +1,118 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/common/common.h"
#include "common/common_types.h"
namespace Core {
namespace Memory {
class Memory;
}
class System;
} // namespace Core
namespace AudioCore {
namespace Sink {
class SinkStream;
}
namespace AudioRenderer {
struct CommandListHeader;
namespace ADSP {
/**
* A processor for command lists given to the AudioRenderer.
*/
class CommandListProcessor {
public:
/**
* Initialize the processor.
*
* @param system_ - The core system.
* @param buffer - The command buffer to process.
* @param size - The size of the buffer.
* @param stream_ - The stream to be used for sending the samples.
*/
void Initialize(Core::System& system, CpuAddr buffer, u64 size, Sink::SinkStream* stream);
/**
* Set the maximum processing time for this command list.
*
* @param time - The maximum process time.
*/
void SetProcessTimeMax(u64 time);
/**
* Get the remaining command count for this list.
*
* @return The remaining command count.
*/
u32 GetRemainingCommandCount() const;
/**
* Set the command buffer.
*
* @param buffer - The buffer to use.
* @param size - The size of the buffer.
*/
void SetBuffer(CpuAddr buffer, u64 size);
/**
* Get the stream for this command list.
*
* @return The stream associated with this command list.
*/
Sink::SinkStream* GetOutputSinkStream() const;
/**
* Process the command list.
*
* @param index - Index of the current command list.
* @return The time taken to process.
*/
u64 Process(u32 session_id);
/// Core system
Core::System* system{};
/// Core memory
Core::Memory::Memory* memory{};
/// Stream for the processed samples
Sink::SinkStream* stream{};
/// Header info for this command list
CommandListHeader* header{};
/// The command buffer
u8* commands{};
/// The command buffer size
u64 commands_buffer_size{};
/// The maximum processing time alloted
u64 max_process_time{};
/// The number of commands in the buffer
u32 command_count{};
/// The target sample count for output
u32 sample_count{};
/// The target sample rate for output
u32 target_sample_rate{};
/// The mixing buffers used by the commands
std::span<s32> mix_buffers{};
/// The number of mix buffers
u32 buffer_count{};
/// The number of processed commands so far
u32 processed_command_count{};
/// The processing start time of this list
u64 start_time{};
/// The current processing time for this list
u64 current_processing_time{};
/// The end processing time for this list
u64 end_time{};
/// Last command list string generated, used for dumping audio commands to console
std::string last_dump{};
};
} // namespace ADSP
} // namespace AudioRenderer
} // namespace AudioCore

View file

@ -0,0 +1,52 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_core.h"
#include "audio_core/common/feature_support.h"
#include "audio_core/renderer/audio_device.h"
#include "audio_core/sink/sink.h"
#include "core/core.h"
namespace AudioCore::AudioRenderer {
AudioDevice::AudioDevice(Core::System& system, const u64 applet_resource_user_id_,
const u32 revision)
: output_sink{system.AudioCore().GetOutputSink()},
applet_resource_user_id{applet_resource_user_id_}, user_revision{revision} {}
u32 AudioDevice::ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer,
const size_t max_count) {
std::span<AudioDeviceName> names{};
if (CheckFeatureSupported(SupportTags::AudioUsbDeviceOutput, user_revision)) {
names = usb_device_names;
} else {
names = device_names;
}
u32 out_count{static_cast<u32>(std::min(max_count, names.size()))};
for (u32 i = 0; i < out_count; i++) {
out_buffer.push_back(names[i]);
}
return out_count;
}
u32 AudioDevice::ListAudioOutputDeviceName(std::vector<AudioDeviceName>& out_buffer,
const size_t max_count) {
u32 out_count{static_cast<u32>(std::min(max_count, output_device_names.size()))};
for (u32 i = 0; i < out_count; i++) {
out_buffer.push_back(output_device_names[i]);
}
return out_count;
}
void AudioDevice::SetDeviceVolumes(const f32 volume) {
output_sink.SetDeviceVolume(volume);
}
f32 AudioDevice::GetDeviceVolume([[maybe_unused]] std::string_view name) {
return output_sink.GetDeviceVolume();
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,88 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/audio_render_manager.h"
namespace Core {
class System;
}
namespace AudioCore {
namespace Sink {
class Sink;
}
namespace AudioRenderer {
/**
* An interface to an output audio device available to the Switch.
*/
class AudioDevice {
public:
struct AudioDeviceName {
std::array<char, 0x100> name;
AudioDeviceName(const char* name_) {
std::strncpy(name.data(), name_, name.size());
}
};
std::array<AudioDeviceName, 4> usb_device_names{"AudioStereoJackOutput",
"AudioBuiltInSpeakerOutput", "AudioTvOutput",
"AudioUsbDeviceOutput"};
std::array<AudioDeviceName, 3> device_names{"AudioStereoJackOutput",
"AudioBuiltInSpeakerOutput", "AudioTvOutput"};
std::array<AudioDeviceName, 3> output_device_names{"AudioBuiltInSpeakerOutput", "AudioTvOutput",
"AudioExternalOutput"};
explicit AudioDevice(Core::System& system, u64 applet_resource_user_id, u32 revision);
/**
* Get a list of the available output devices.
*
* @param out_buffer - Output buffer to write the available device names.
* @param max_count - Maximum number of devices to write (count of out_buffer).
* @return Number of device names written.
*/
u32 ListAudioDeviceName(std::vector<AudioDeviceName>& out_buffer, size_t max_count);
/**
* Get a list of the available output devices.
* Different to above somehow...
*
* @param out_buffer - Output buffer to write the available device names.
* @param max_count - Maximum number of devices to write (count of out_buffer).
* @return Number of device names written.
*/
u32 ListAudioOutputDeviceName(std::vector<AudioDeviceName>& out_buffer, size_t max_count);
/**
* Set the volume of all streams in the backend sink.
*
* @param volume - Volume to set.
*/
void SetDeviceVolumes(f32 volume);
/**
* Get the volume for a given device name.
* Note: This is not fully implemented, we only assume 1 device for all streams.
*
* @param name - Name of the device to check. Unused.
* @return Volume of the device.
*/
f32 GetDeviceVolume(std::string_view name);
private:
/// Backend output sink for the device
Sink::Sink& output_sink;
/// Resource id this device is used for
const u64 applet_resource_user_id;
/// User audio renderer revision
const u32 user_revision;
};
} // namespace AudioRenderer
} // namespace AudioCore

View file

@ -0,0 +1,67 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/audio_render_manager.h"
#include "audio_core/common/audio_renderer_parameter.h"
#include "audio_core/renderer/audio_renderer.h"
#include "audio_core/renderer/system_manager.h"
#include "core/core.h"
#include "core/hle/kernel/k_transfer_memory.h"
#include "core/hle/service/audio/errors.h"
namespace AudioCore::AudioRenderer {
Renderer::Renderer(Core::System& system_, Manager& manager_, Kernel::KEvent* rendered_event)
: core{system_}, manager{manager_}, system{system_, rendered_event} {}
Result Renderer::Initialize(const AudioRendererParameterInternal& params,
Kernel::KTransferMemory* transfer_memory,
const u64 transfer_memory_size, const u32 process_handle,
const u64 applet_resource_user_id, const s32 session_id) {
if (params.execution_mode == ExecutionMode::Auto) {
if (!manager.AddSystem(system)) {
LOG_ERROR(Service_Audio,
"Both Audio Render sessions are in use, cannot create any more");
return Service::Audio::ERR_MAXIMUM_SESSIONS_REACHED;
}
system_registered = true;
}
initialized = true;
system.Initialize(params, transfer_memory, transfer_memory_size, process_handle,
applet_resource_user_id, session_id);
return ResultSuccess;
}
void Renderer::Finalize() {
auto session_id{system.GetSessionId()};
system.Finalize();
if (system_registered) {
manager.RemoveSystem(system);
system_registered = false;
}
manager.ReleaseSessionId(session_id);
}
System& Renderer::GetSystem() {
return system;
}
void Renderer::Start() {
system.Start();
}
void Renderer::Stop() {
system.Stop();
}
Result Renderer::RequestUpdate(std::span<const u8> input, std::span<u8> performance,
std::span<u8> output) {
return system.Update(input, performance, output);
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,97 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/renderer/system.h"
#include "core/hle/service/audio/errors.h"
namespace Core {
class System;
}
namespace Kernel {
class KTransferMemory;
}
namespace AudioCore {
struct AudioRendererParameterInternal;
namespace AudioRenderer {
class Manager;
/**
* Audio Renderer, wraps the main audio system and is mainly responsible for handling service calls.
*/
class Renderer {
public:
explicit Renderer(Core::System& system, Manager& manager, Kernel::KEvent* rendered_event);
/**
* Initialize the renderer.
* Registers the system with the AudioRenderer::Manager, allocates workbuffers and initializes
* everything to a default state.
*
* @param params - Input parameters to initialize the system with.
* @param transfer_memory - Game-supplied memory for all workbuffers. Unused.
* @param transfer_memory_size - Size of the transfer memory. Unused.
* @param process_handle - Process handle, also used for memory. Unused.
* @param applet_resource_user_id - Applet id for this renderer. Unused.
* @param session_id - Session id of this renderer.
* @return Result code.
*/
Result Initialize(const AudioRendererParameterInternal& params,
Kernel::KTransferMemory* transfer_memory, u64 transfer_memory_size,
u32 process_handle, u64 applet_resource_user_id, s32 session_id);
/**
* Finalize the renderer for shutdown.
*/
void Finalize();
/**
* Get the renderer's system.
*
* @return Reference to the system.
*/
System& GetSystem();
/**
* Start the renderer.
*/
void Start();
/**
* Stop the renderer.
*/
void Stop();
/**
* Update the audio renderer with new information.
* Called via RequestUpdate from the AudRen:U service.
*
* @param input - Input buffer containing the new data.
* @param performance - Optional performance buffer for outputting performance metrics.
* @param output - Output data from the renderer.
* @return Result code.
*/
Result RequestUpdate(std::span<const u8> input, std::span<u8> performance,
std::span<u8> output);
private:
/// System core
Core::System& core;
/// Manager this renderer is registered with
Manager& manager;
/// Is the audio renderer initialized?
bool initialized{};
/// Is the system registered with the manager?
bool system_registered{};
/// Audio render system, main driver of audio rendering
System system;
};
} // namespace AudioRenderer
} // namespace AudioCore

View file

@ -0,0 +1,174 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/common/feature_support.h"
#include "audio_core/renderer/behavior/behavior_info.h"
namespace AudioCore::AudioRenderer {
BehaviorInfo::BehaviorInfo() : process_revision{CurrentRevision} {}
u32 BehaviorInfo::GetProcessRevisionNum() const {
return process_revision;
}
u32 BehaviorInfo::GetProcessRevision() const {
return Common::MakeMagic('R', 'E', 'V',
static_cast<char>(static_cast<u8>('0') + process_revision));
}
u32 BehaviorInfo::GetUserRevisionNum() const {
return user_revision;
}
u32 BehaviorInfo::GetUserRevision() const {
return Common::MakeMagic('R', 'E', 'V',
static_cast<char>(static_cast<u8>('0') + user_revision));
}
void BehaviorInfo::SetUserLibRevision(const u32 user_revision_) {
user_revision = GetRevisionNum(user_revision_);
}
void BehaviorInfo::ClearError() {
error_count = 0;
}
void BehaviorInfo::AppendError(ErrorInfo& error) {
LOG_ERROR(Service_Audio, "Error during RequestUpdate, reporting code {:04X} address {:08X}",
error.error_code.raw, error.address);
if (error_count < MaxErrors) {
errors[error_count++] = error;
}
}
void BehaviorInfo::CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count) {
auto error_count_{std::min(error_count, MaxErrors)};
std::memset(out_errors.data(), 0, MaxErrors * sizeof(ErrorInfo));
for (size_t i = 0; i < error_count_; i++) {
out_errors[i] = errors[i];
}
out_count = error_count_;
}
void BehaviorInfo::UpdateFlags(const Flags flags_) {
flags = flags_;
}
bool BehaviorInfo::IsMemoryForceMappingEnabled() const {
return flags.IsMemoryForceMappingEnabled;
}
bool BehaviorInfo::IsAdpcmLoopContextBugFixed() const {
return CheckFeatureSupported(SupportTags::AdpcmLoopContextBugFix, user_revision);
}
bool BehaviorInfo::IsSplitterSupported() const {
return CheckFeatureSupported(SupportTags::Splitter, user_revision);
}
bool BehaviorInfo::IsSplitterBugFixed() const {
return CheckFeatureSupported(SupportTags::SplitterBugFix, user_revision);
}
bool BehaviorInfo::IsEffectInfoVersion2Supported() const {
return CheckFeatureSupported(SupportTags::EffectInfoVer2, user_revision);
}
bool BehaviorInfo::IsVariadicCommandBufferSizeSupported() const {
return CheckFeatureSupported(SupportTags::AudioRendererVariadicCommandBufferSize,
user_revision);
}
bool BehaviorInfo::IsWaveBufferVer2Supported() const {
return CheckFeatureSupported(SupportTags::WaveBufferVer2, user_revision);
}
bool BehaviorInfo::IsLongSizePreDelaySupported() const {
return CheckFeatureSupported(SupportTags::LongSizePreDelay, user_revision);
}
bool BehaviorInfo::IsCommandProcessingTimeEstimatorVersion2Supported() const {
return CheckFeatureSupported(SupportTags::CommandProcessingTimeEstimatorVersion2,
user_revision);
}
bool BehaviorInfo::IsCommandProcessingTimeEstimatorVersion3Supported() const {
return CheckFeatureSupported(SupportTags::CommandProcessingTimeEstimatorVersion3,
user_revision);
}
bool BehaviorInfo::IsCommandProcessingTimeEstimatorVersion4Supported() const {
return CheckFeatureSupported(SupportTags::CommandProcessingTimeEstimatorVersion4,
user_revision);
}
bool BehaviorInfo::IsAudioRenererProcessingTimeLimit70PercentSupported() const {
return CheckFeatureSupported(SupportTags::AudioRendererProcessingTimeLimit70Percent,
user_revision);
}
bool BehaviorInfo::IsAudioRenererProcessingTimeLimit75PercentSupported() const {
return CheckFeatureSupported(SupportTags::AudioRendererProcessingTimeLimit75Percent,
user_revision);
}
bool BehaviorInfo::IsAudioRenererProcessingTimeLimit80PercentSupported() const {
return CheckFeatureSupported(SupportTags::AudioRendererProcessingTimeLimit80Percent,
user_revision);
}
bool BehaviorInfo::IsFlushVoiceWaveBuffersSupported() const {
return CheckFeatureSupported(SupportTags::FlushVoiceWaveBuffers, user_revision);
}
bool BehaviorInfo::IsElapsedFrameCountSupported() const {
return CheckFeatureSupported(SupportTags::ElapsedFrameCount, user_revision);
}
bool BehaviorInfo::IsPerformanceMetricsDataFormatVersion2Supported() const {
return CheckFeatureSupported(SupportTags::PerformanceMetricsDataFormatVersion2, user_revision);
}
size_t BehaviorInfo::GetPerformanceMetricsDataFormat() const {
if (CheckFeatureSupported(SupportTags::PerformanceMetricsDataFormatVersion2, user_revision)) {
return 2;
}
return 1;
}
bool BehaviorInfo::IsVoicePitchAndSrcSkippedSupported() const {
return CheckFeatureSupported(SupportTags::VoicePitchAndSrcSkipped, user_revision);
}
bool BehaviorInfo::IsVoicePlayedSampleCountResetAtLoopPointSupported() const {
return CheckFeatureSupported(SupportTags::VoicePlayedSampleCountResetAtLoopPoint,
user_revision);
}
bool BehaviorInfo::IsBiquadFilterEffectStateClaerBugFixed() const {
return CheckFeatureSupported(SupportTags::BiquadFilterEffectStateClearBugFix, user_revision);
}
bool BehaviorInfo::IsVolumeMixParameterPrecisionQ23Supported() const {
return CheckFeatureSupported(SupportTags::VolumeMixParameterPrecisionQ23, user_revision);
}
bool BehaviorInfo::UseBiquadFilterFloatProcessing() const {
return CheckFeatureSupported(SupportTags::BiquadFilterFloatProcessing, user_revision);
}
bool BehaviorInfo::IsMixInParameterDirtyOnlyUpdateSupported() const {
return CheckFeatureSupported(SupportTags::MixInParameterDirtyOnlyUpdate, user_revision);
}
bool BehaviorInfo::UseMultiTapBiquadFilterProcessing() const {
return CheckFeatureSupported(SupportTags::MultiTapBiquadFilterProcessing, user_revision);
}
bool BehaviorInfo::IsDeviceApiVersion2Supported() const {
return CheckFeatureSupported(SupportTags::DeviceApiVersion2, user_revision);
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,337 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <span>
#include "audio_core/common/common.h"
#include "common/common_types.h"
#include "core/hle/service/audio/errors.h"
namespace AudioCore::AudioRenderer {
/**
* Holds host and user revisions, checks whether render features can be enabled, and reports errors.
*/
class BehaviorInfo {
static constexpr u32 MaxErrors = 10;
public:
struct ErrorInfo {
/* 0x00 */ Result error_code{0};
/* 0x04 */ u32 unk_04;
/* 0x08 */ CpuAddr address;
};
static_assert(sizeof(ErrorInfo) == 0x10, "BehaviorInfo::ErrorInfo has the wrong size!");
struct Flags {
u64 IsMemoryForceMappingEnabled : 1;
};
struct InParameter {
/* 0x00 */ u32 revision;
/* 0x08 */ Flags flags;
};
static_assert(sizeof(InParameter) == 0x10, "BehaviorInfo::InParameter has the wrong size!");
struct OutStatus {
/* 0x00 */ std::array<ErrorInfo, MaxErrors> errors;
/* 0xA0 */ u32 error_count;
/* 0xA4 */ char unkA4[0xC];
};
static_assert(sizeof(OutStatus) == 0xB0, "BehaviorInfo::OutStatus has the wrong size!");
BehaviorInfo();
/**
* Get the host revision as a number.
*
* @return The host revision.
*/
u32 GetProcessRevisionNum() const;
/**
* Get the host revision in chars, e.g REV8.
* Rev 10 and higher use the ascii characters above 9.
* E.g:
* Rev 10 = REV:
* Rev 11 = REV;
*
* @return The host revision.
*/
u32 GetProcessRevision() const;
/**
* Get the user revision as a number.
*
* @return The user revision.
*/
u32 GetUserRevisionNum() const;
/**
* Get the user revision in chars, e.g REV8.
* Rev 10 and higher use the ascii characters above 9. REV: REV; etc.
*
* @return The user revision.
*/
u32 GetUserRevision() const;
/**
* Set the user revision.
*
* @param user_revision - The user's revision.
*/
void SetUserLibRevision(u32 user_revision);
/**
* Clear the current error count.
*/
void ClearError();
/**
* Append an error to the error list.
*
* @param error - The new error.
*/
void AppendError(ErrorInfo& error);
/**
* Copy errors to the given output container.
*
* @param out_errors - Output container to receive the errors.
* @param out_count - The number of errors written.
*/
void CopyErrorInfo(std::span<ErrorInfo> out_errors, u32& out_count);
/**
* Update the behaviour flags.
*
* @param flags - New flags to use.
*/
void UpdateFlags(Flags flags);
/**
* Check if memory pools can be forcibly mapped.
*
* @return True if enabled, otherwise false.
*/
bool IsMemoryForceMappingEnabled() const;
/**
* Check if the ADPCM context bug is fixed.
* The ADPCM context was not being sent to the AudioRenderer, leading to incorrect scaling being
* used.
*
* @return True if fixed, otherwise false.
*/
bool IsAdpcmLoopContextBugFixed() const;
/**
* Check if the splitter is supported.
*
* @return True if supported, otherwise false.
*/
bool IsSplitterSupported() const;
/**
* Check if the splitter bug is fixed.
* Update is given the wrong number of splitter destinations, leading to invalid data
* being processed.
*
* @return True if supported, otherwise false.
*/
bool IsSplitterBugFixed() const;
/**
* Check if effects version 2 are supported.
* This gives support for returning effect states from the AudioRenderer, currently only used
* for Limiter statistics.
*
* @return True if supported, otherwise false.
*/
bool IsEffectInfoVersion2Supported() const;
/**
* Check if a variadic command buffer is supported.
* As of Rev 5 with the added optional performance metric logging, the command
* buffer can be a variable size, so take that into account for calcualting its size.
*
* @return True if supported, otherwise false.
*/
bool IsVariadicCommandBufferSizeSupported() const;
/**
* Check if wave buffers version 2 are supported.
* See WaveBufferVersion1 and WaveBufferVersion2.
*
* @return True if supported, otherwise false.
*/
bool IsWaveBufferVer2Supported() const;
/**
* Check if long size pre delay is supported.
* This allows a longer initial delay time for the Reverb command.
*
* @return True if supported, otherwise false.
*/
bool IsLongSizePreDelaySupported() const;
/**
* Check if the command time estimator version 2 is supported.
*
* @return True if supported, otherwise false.
*/
bool IsCommandProcessingTimeEstimatorVersion2Supported() const;
/**
* Check if the command time estimator version 3 is supported.
*
* @return True if supported, otherwise false.
*/
bool IsCommandProcessingTimeEstimatorVersion3Supported() const;
/**
* Check if the command time estimator version 4 is supported.
*
* @return True if supported, otherwise false.
*/
bool IsCommandProcessingTimeEstimatorVersion4Supported() const;
/**
* Check if the AudioRenderer can use up to 70% of the allocated processing timeslice.
* Note: Name is correct, Nintendo have the typo here
*
* @return True if supported, otherwise false.
*/
bool IsAudioRenererProcessingTimeLimit70PercentSupported() const;
/**
* Check if the AudioRenderer can use up to 75% of the allocated processing timeslice.
* Note: Name is correct, Nintendo have the typo here
*
* @return True if supported, otherwise false.
*/
bool IsAudioRenererProcessingTimeLimit75PercentSupported() const;
/**
* Check if the AudioRenderer can use up to 80% of the allocated processing timeslice.
* Note: Name is correct, Nintendo have the typo here
*
* @return True if supported, otherwise false.
*/
bool IsAudioRenererProcessingTimeLimit80PercentSupported() const;
/**
* Check if voice flushing is supported
* This allowws low-priority voices to be dropped if the AudioRenderer is running behind.
*
* @return True if supported, otherwise false.
*/
bool IsFlushVoiceWaveBuffersSupported() const;
/**
* Check if counting the number of elapsed frames is supported.
* This adds extra output to RequestUpdate, returning the number of times the AudioRenderer
* processed a command list.
*
* @return True if supported, otherwise false.
*/
bool IsElapsedFrameCountSupported() const;
/**
* Check if performance metrics version 2 are supported.
* This adds extra output to RequestUpdate, returning the number of times the AudioRenderer
* (Unused?).
*
* @return True if supported, otherwise false.
*/
bool IsPerformanceMetricsDataFormatVersion2Supported() const;
/**
* Get the supported performance metrics version.
* Version 2 logs some extra fields in output, such as number of voices dropped,
* processing start time, if the AudioRenderer exceeded its time, etc.
*
* @return Version supported, either 1 or 2.
*/
size_t GetPerformanceMetricsDataFormat() const;
/**
* Check if skipping voice pitch and sample rate conversion is supported.
* This speeds up the data source commands by skipping resampling if unwanted.
* See AudioCore::AudioRenderer::DecodeFromWaveBuffers
*
* @return True if supported, otherwise false.
*/
bool IsVoicePitchAndSrcSkippedSupported() const;
/**
* Check if resetting played sample count at loop points is supported.
* This resets the number of samples played in a voice state when a loop point is reached.
* See AudioCore::AudioRenderer::DecodeFromWaveBuffers
*
* @return True if supported, otherwise false.
*/
bool IsVoicePlayedSampleCountResetAtLoopPointSupported() const;
/**
* Check if the clear state bug for biquad filters is fixed.
* The biquad state was not marked as needing re-initialisation when the effect was updated, it
* was only initialized once with a new effect.
* Note: Name is correct, Nintendo have the typo here
*
* @return True if fixed, otherwise false.
*/
bool IsBiquadFilterEffectStateClaerBugFixed() const;
/**
* Check if Q23 precision is supported for fixed point.
*
* @return True if supported, otherwise false.
*/
bool IsVolumeMixParameterPrecisionQ23Supported() const;
/**
* Check if float processing for biuad filters is supported.
*
* @return True if supported, otherwise false.
*/
bool UseBiquadFilterFloatProcessing() const;
/**
* Check if dirty-only mix updates are supported.
* This saves a lot of buffer size as mixes can be large and not change much.
*
* @return True if supported, otherwise false.
*/
bool IsMixInParameterDirtyOnlyUpdateSupported() const;
/**
* Check if multi-tap biquad filters are supported.
*
* @return True if supported, otherwise false.
*/
bool UseMultiTapBiquadFilterProcessing() const;
/**
* Check if device api version 2 is supported.
* In the SDK but not in any sysmodule? Not sure, left here for completeness anyway.
*
* @return True if supported, otherwise false.
*/
bool IsDeviceApiVersion2Supported() const;
/// Host version
u32 process_revision;
/// User version
u32 user_revision{};
/// Behaviour flags
Flags flags{};
/// Errors generated and reported during Update
std::array<ErrorInfo, MaxErrors> errors{};
/// Error count
u32 error_count{};
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,539 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/common/feature_support.h"
#include "audio_core/renderer/behavior/behavior_info.h"
#include "audio_core/renderer/behavior/info_updater.h"
#include "audio_core/renderer/effect/effect_context.h"
#include "audio_core/renderer/effect/effect_reset.h"
#include "audio_core/renderer/memory/memory_pool_info.h"
#include "audio_core/renderer/mix/mix_context.h"
#include "audio_core/renderer/performance/performance_manager.h"
#include "audio_core/renderer/sink/circular_buffer_sink_info.h"
#include "audio_core/renderer/sink/device_sink_info.h"
#include "audio_core/renderer/sink/sink_context.h"
#include "audio_core/renderer/splitter/splitter_context.h"
#include "audio_core/renderer/voice/voice_context.h"
namespace AudioCore::AudioRenderer {
InfoUpdater::InfoUpdater(std::span<const u8> input_, std::span<u8> output_,
const u32 process_handle_, BehaviorInfo& behaviour_)
: input{input_.data() + sizeof(UpdateDataHeader)},
input_origin{input_}, output{output_.data() + sizeof(UpdateDataHeader)},
output_origin{output_}, in_header{reinterpret_cast<const UpdateDataHeader*>(
input_origin.data())},
out_header{reinterpret_cast<UpdateDataHeader*>(output_origin.data())},
expected_input_size{input_.size()}, expected_output_size{output_.size()},
process_handle{process_handle_}, behaviour{behaviour_} {
std::construct_at<UpdateDataHeader>(out_header, behaviour.GetProcessRevision());
}
Result InfoUpdater::UpdateVoiceChannelResources(VoiceContext& voice_context) {
const auto voice_count{voice_context.GetCount()};
std::span<const VoiceChannelResource::InParameter> in_params{
reinterpret_cast<const VoiceChannelResource::InParameter*>(input), voice_count};
for (u32 i = 0; i < voice_count; i++) {
auto& resource{voice_context.GetChannelResource(i)};
resource.in_use = in_params[i].in_use;
if (in_params[i].in_use) {
resource.mix_volumes = in_params[i].mix_volumes;
}
}
const auto consumed_input_size{voice_count *
static_cast<u32>(sizeof(VoiceChannelResource::InParameter))};
if (consumed_input_size != in_header->voice_resources_size) {
LOG_ERROR(Service_Audio,
"Consumed an incorrect voice resource size, header size={}, consumed={}",
in_header->voice_resources_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += consumed_input_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateVoices(VoiceContext& voice_context,
std::span<MemoryPoolInfo> memory_pools,
const u32 memory_pool_count) {
const PoolMapper pool_mapper(process_handle, memory_pools, memory_pool_count,
behaviour.IsMemoryForceMappingEnabled());
const auto voice_count{voice_context.GetCount()};
std::span<const VoiceInfo::InParameter> in_params{
reinterpret_cast<const VoiceInfo::InParameter*>(input), voice_count};
std::span<VoiceInfo::OutStatus> out_params{reinterpret_cast<VoiceInfo::OutStatus*>(output),
voice_count};
for (u32 i = 0; i < voice_count; i++) {
auto& voice_info{voice_context.GetInfo(i)};
voice_info.in_use = false;
}
u32 new_voice_count{0};
for (u32 i = 0; i < voice_count; i++) {
const auto& in_param{in_params[i]};
std::array<VoiceState*, MaxChannels> voice_states{};
if (!in_param.in_use) {
continue;
}
auto& voice_info{voice_context.GetInfo(in_param.id)};
for (u32 channel = 0; channel < in_param.channel_count; channel++) {
voice_states[channel] = &voice_context.GetState(in_param.channel_resource_ids[channel]);
}
if (in_param.is_new) {
voice_info.Initialize();
for (u32 channel = 0; channel < in_param.channel_count; channel++) {
std::memset(voice_states[channel], 0, sizeof(VoiceState));
}
}
BehaviorInfo::ErrorInfo update_error{};
voice_info.UpdateParameters(update_error, in_param, pool_mapper, behaviour);
if (!update_error.error_code.IsSuccess()) {
behaviour.AppendError(update_error);
}
std::array<std::array<BehaviorInfo::ErrorInfo, 2>, MaxWaveBuffers> wavebuffer_errors{};
voice_info.UpdateWaveBuffers(wavebuffer_errors, MaxWaveBuffers * 2, in_param, voice_states,
pool_mapper, behaviour);
for (auto& wavebuffer_error : wavebuffer_errors) {
for (auto& error : wavebuffer_error) {
if (error.error_code.IsError()) {
behaviour.AppendError(error);
}
}
}
voice_info.WriteOutStatus(out_params[i], in_param, voice_states);
new_voice_count += in_param.channel_count;
}
auto consumed_input_size{voice_count * static_cast<u32>(sizeof(VoiceInfo::InParameter))};
auto consumed_output_size{voice_count * static_cast<u32>(sizeof(VoiceInfo::OutStatus))};
if (consumed_input_size != in_header->voices_size) {
LOG_ERROR(Service_Audio, "Consumed an incorrect voices size, header size={}, consumed={}",
in_header->voices_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
out_header->voices_size = consumed_output_size;
out_header->size += consumed_output_size;
input += consumed_input_size;
output += consumed_output_size;
voice_context.SetActiveCount(new_voice_count);
return ResultSuccess;
}
Result InfoUpdater::UpdateEffects(EffectContext& effect_context, const bool renderer_active,
std::span<MemoryPoolInfo> memory_pools,
const u32 memory_pool_count) {
if (behaviour.IsEffectInfoVersion2Supported()) {
return UpdateEffectsVersion2(effect_context, renderer_active, memory_pools,
memory_pool_count);
} else {
return UpdateEffectsVersion1(effect_context, renderer_active, memory_pools,
memory_pool_count);
}
}
Result InfoUpdater::UpdateEffectsVersion1(EffectContext& effect_context, const bool renderer_active,
std::span<MemoryPoolInfo> memory_pools,
const u32 memory_pool_count) {
PoolMapper pool_mapper(process_handle, memory_pools, memory_pool_count,
behaviour.IsMemoryForceMappingEnabled());
const auto effect_count{effect_context.GetCount()};
std::span<const EffectInfoBase::InParameterVersion1> in_params{
reinterpret_cast<const EffectInfoBase::InParameterVersion1*>(input), effect_count};
std::span<EffectInfoBase::OutStatusVersion1> out_params{
reinterpret_cast<EffectInfoBase::OutStatusVersion1*>(output), effect_count};
for (u32 i = 0; i < effect_count; i++) {
auto effect_info{&effect_context.GetInfo(i)};
if (effect_info->GetType() != in_params[i].type) {
effect_info->ForceUnmapBuffers(pool_mapper);
ResetEffect(effect_info, in_params[i].type);
}
BehaviorInfo::ErrorInfo error_info{};
effect_info->Update(error_info, in_params[i], pool_mapper);
if (error_info.error_code.IsError()) {
behaviour.AppendError(error_info);
}
effect_info->StoreStatus(out_params[i], renderer_active);
}
auto consumed_input_size{effect_count *
static_cast<u32>(sizeof(EffectInfoBase::InParameterVersion1))};
auto consumed_output_size{effect_count *
static_cast<u32>(sizeof(EffectInfoBase::OutStatusVersion1))};
if (consumed_input_size != in_header->effects_size) {
LOG_ERROR(Service_Audio, "Consumed an incorrect effects size, header size={}, consumed={}",
in_header->effects_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
out_header->effects_size = consumed_output_size;
out_header->size += consumed_output_size;
input += consumed_input_size;
output += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateEffectsVersion2(EffectContext& effect_context, const bool renderer_active,
std::span<MemoryPoolInfo> memory_pools,
const u32 memory_pool_count) {
PoolMapper pool_mapper(process_handle, memory_pools, memory_pool_count,
behaviour.IsMemoryForceMappingEnabled());
const auto effect_count{effect_context.GetCount()};
std::span<const EffectInfoBase::InParameterVersion2> in_params{
reinterpret_cast<const EffectInfoBase::InParameterVersion2*>(input), effect_count};
std::span<EffectInfoBase::OutStatusVersion2> out_params{
reinterpret_cast<EffectInfoBase::OutStatusVersion2*>(output), effect_count};
for (u32 i = 0; i < effect_count; i++) {
auto effect_info{&effect_context.GetInfo(i)};
if (effect_info->GetType() != in_params[i].type) {
effect_info->ForceUnmapBuffers(pool_mapper);
ResetEffect(effect_info, in_params[i].type);
}
BehaviorInfo::ErrorInfo error_info{};
effect_info->Update(error_info, in_params[i], pool_mapper);
if (error_info.error_code.IsError()) {
behaviour.AppendError(error_info);
}
effect_info->StoreStatus(out_params[i], renderer_active);
if (in_params[i].is_new) {
effect_info->InitializeResultState(effect_context.GetDspSharedResultState(i));
effect_info->InitializeResultState(effect_context.GetResultState(i));
}
effect_info->UpdateResultState(out_params[i].result_state,
effect_context.GetResultState(i));
}
auto consumed_input_size{effect_count *
static_cast<u32>(sizeof(EffectInfoBase::InParameterVersion2))};
auto consumed_output_size{effect_count *
static_cast<u32>(sizeof(EffectInfoBase::OutStatusVersion2))};
if (consumed_input_size != in_header->effects_size) {
LOG_ERROR(Service_Audio, "Consumed an incorrect effects size, header size={}, consumed={}",
in_header->effects_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
out_header->effects_size = consumed_output_size;
out_header->size += consumed_output_size;
input += consumed_input_size;
output += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateMixes(MixContext& mix_context, const u32 mix_buffer_count,
EffectContext& effect_context, SplitterContext& splitter_context) {
s32 mix_count{0};
u32 consumed_input_size{0};
if (behaviour.IsMixInParameterDirtyOnlyUpdateSupported()) {
auto in_dirty_params{reinterpret_cast<const MixInfo::InDirtyParameter*>(input)};
mix_count = in_dirty_params->count;
input += sizeof(MixInfo::InDirtyParameter);
consumed_input_size = static_cast<u32>(sizeof(MixInfo::InDirtyParameter) +
mix_count * sizeof(MixInfo::InParameter));
} else {
mix_count = mix_context.GetCount();
consumed_input_size = static_cast<u32>(mix_count * sizeof(MixInfo::InParameter));
}
if (mix_buffer_count == 0) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
std::span<const MixInfo::InParameter> in_params{
reinterpret_cast<const MixInfo::InParameter*>(input), static_cast<size_t>(mix_count)};
u32 total_buffer_count{0};
for (s32 i = 0; i < mix_count; i++) {
const auto& params{in_params[i]};
if (params.in_use) {
total_buffer_count += params.buffer_count;
if (params.dest_mix_id > static_cast<s32>(mix_context.GetCount()) &&
params.dest_mix_id != UnusedMixId && params.mix_id != FinalMixId) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
}
}
if (total_buffer_count > mix_buffer_count) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
bool mix_dirty{false};
for (s32 i = 0; i < mix_count; i++) {
const auto& params{in_params[i]};
s32 mix_id{i};
if (behaviour.IsMixInParameterDirtyOnlyUpdateSupported()) {
mix_id = params.mix_id;
}
auto mix_info{mix_context.GetInfo(mix_id)};
if (mix_info->in_use != params.in_use) {
mix_info->in_use = params.in_use;
if (!params.in_use) {
mix_info->ClearEffectProcessingOrder();
}
mix_dirty = true;
}
if (params.in_use) {
mix_dirty |= mix_info->Update(mix_context.GetEdgeMatrix(), params, effect_context,
splitter_context, behaviour);
}
}
if (mix_dirty) {
if (behaviour.IsSplitterSupported() && splitter_context.UsingSplitter()) {
if (!mix_context.TSortInfo(splitter_context)) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
} else {
mix_context.SortInfo();
}
}
if (consumed_input_size != in_header->mix_size) {
LOG_ERROR(Service_Audio, "Consumed an incorrect mixes size, header size={}, consumed={}",
in_header->mix_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += mix_count * sizeof(MixInfo::InParameter);
return ResultSuccess;
}
Result InfoUpdater::UpdateSinks(SinkContext& sink_context, std::span<MemoryPoolInfo> memory_pools,
const u32 memory_pool_count) {
PoolMapper pool_mapper(process_handle, memory_pools, memory_pool_count,
behaviour.IsMemoryForceMappingEnabled());
std::span<const SinkInfoBase::InParameter> in_params{
reinterpret_cast<const SinkInfoBase::InParameter*>(input), memory_pool_count};
std::span<SinkInfoBase::OutStatus> out_params{
reinterpret_cast<SinkInfoBase::OutStatus*>(output), memory_pool_count};
const auto sink_count{sink_context.GetCount()};
for (u32 i = 0; i < sink_count; i++) {
const auto& params{in_params[i]};
auto sink_info{sink_context.GetInfo(i)};
if (sink_info->GetType() != params.type) {
sink_info->CleanUp();
switch (params.type) {
case SinkInfoBase::Type::Invalid:
std::construct_at<SinkInfoBase>(reinterpret_cast<SinkInfoBase*>(sink_info));
break;
case SinkInfoBase::Type::DeviceSink:
std::construct_at<DeviceSinkInfo>(reinterpret_cast<DeviceSinkInfo*>(sink_info));
break;
case SinkInfoBase::Type::CircularBufferSink:
std::construct_at<CircularBufferSinkInfo>(
reinterpret_cast<CircularBufferSinkInfo*>(sink_info));
break;
default:
LOG_ERROR(Service_Audio, "Invalid sink type {}", static_cast<u32>(params.type));
break;
}
}
BehaviorInfo::ErrorInfo error_info{};
sink_info->Update(error_info, out_params[i], params, pool_mapper);
if (error_info.error_code.IsError()) {
behaviour.AppendError(error_info);
}
}
const auto consumed_input_size{sink_count *
static_cast<u32>(sizeof(SinkInfoBase::InParameter))};
const auto consumed_output_size{sink_count * static_cast<u32>(sizeof(SinkInfoBase::OutStatus))};
if (consumed_input_size != in_header->sinks_size) {
LOG_ERROR(Service_Audio, "Consumed an incorrect sinks size, header size={}, consumed={}",
in_header->sinks_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += consumed_input_size;
output += consumed_output_size;
out_header->sinks_size = consumed_output_size;
out_header->size += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateMemoryPools(std::span<MemoryPoolInfo> memory_pools,
const u32 memory_pool_count) {
PoolMapper pool_mapper(process_handle, memory_pools, memory_pool_count,
behaviour.IsMemoryForceMappingEnabled());
std::span<const MemoryPoolInfo::InParameter> in_params{
reinterpret_cast<const MemoryPoolInfo::InParameter*>(input), memory_pool_count};
std::span<MemoryPoolInfo::OutStatus> out_params{
reinterpret_cast<MemoryPoolInfo::OutStatus*>(output), memory_pool_count};
for (size_t i = 0; i < memory_pool_count; i++) {
auto state{pool_mapper.Update(memory_pools[i], in_params[i], out_params[i])};
if (state != MemoryPoolInfo::ResultState::Success &&
state != MemoryPoolInfo::ResultState::BadParam &&
state != MemoryPoolInfo::ResultState::MapFailed &&
state != MemoryPoolInfo::ResultState::InUse) {
LOG_WARNING(Service_Audio, "Invalid ResultState from updating memory pools");
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
}
const auto consumed_input_size{memory_pool_count *
static_cast<u32>(sizeof(MemoryPoolInfo::InParameter))};
const auto consumed_output_size{memory_pool_count *
static_cast<u32>(sizeof(MemoryPoolInfo::OutStatus))};
if (consumed_input_size != in_header->memory_pool_size) {
LOG_ERROR(Service_Audio,
"Consumed an incorrect memory pool size, header size={}, consumed={}",
in_header->memory_pool_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += consumed_input_size;
output += consumed_output_size;
out_header->memory_pool_size = consumed_output_size;
out_header->size += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::UpdatePerformanceBuffer(std::span<u8> performance_output,
const u64 performance_output_size,
PerformanceManager* performance_manager) {
auto in_params{reinterpret_cast<const PerformanceManager::InParameter*>(input)};
auto out_params{reinterpret_cast<PerformanceManager::OutStatus*>(output)};
if (performance_manager != nullptr) {
out_params->history_size =
performance_manager->CopyHistories(performance_output.data(), performance_output_size);
performance_manager->SetDetailTarget(in_params->target_node_id);
} else {
out_params->history_size = 0;
}
const auto consumed_input_size{static_cast<u32>(sizeof(PerformanceManager::InParameter))};
const auto consumed_output_size{static_cast<u32>(sizeof(PerformanceManager::OutStatus))};
if (consumed_input_size != in_header->performance_buffer_size) {
LOG_ERROR(Service_Audio,
"Consumed an incorrect performance size, header size={}, consumed={}",
in_header->performance_buffer_size, consumed_input_size);
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += consumed_input_size;
output += consumed_output_size;
out_header->performance_buffer_size = consumed_output_size;
out_header->size += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateBehaviorInfo(BehaviorInfo& behaviour_) {
const auto in_params{reinterpret_cast<const BehaviorInfo::InParameter*>(input)};
if (!CheckValidRevision(in_params->revision)) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
if (in_params->revision != behaviour_.GetUserRevision()) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
behaviour_.ClearError();
behaviour_.UpdateFlags(in_params->flags);
if (in_header->behaviour_size != sizeof(BehaviorInfo::InParameter)) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += sizeof(BehaviorInfo::InParameter);
return ResultSuccess;
}
Result InfoUpdater::UpdateErrorInfo(BehaviorInfo& behaviour_) {
auto out_params{reinterpret_cast<BehaviorInfo::OutStatus*>(output)};
behaviour_.CopyErrorInfo(out_params->errors, out_params->error_count);
const auto consumed_output_size{static_cast<u32>(sizeof(BehaviorInfo::OutStatus))};
output += consumed_output_size;
out_header->behaviour_size = consumed_output_size;
out_header->size += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateSplitterInfo(SplitterContext& splitter_context) {
u32 consumed_size{0};
if (!splitter_context.Update(input, consumed_size)) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
input += consumed_size;
return ResultSuccess;
}
Result InfoUpdater::UpdateRendererInfo(const u64 elapsed_frames) {
struct RenderInfo {
/* 0x00 */ u64 frames_elapsed;
/* 0x08 */ char unk08[0x8];
};
static_assert(sizeof(RenderInfo) == 0x10, "RenderInfo has the wrong size!");
auto out_params{reinterpret_cast<RenderInfo*>(output)};
out_params->frames_elapsed = elapsed_frames;
const auto consumed_output_size{static_cast<u32>(sizeof(RenderInfo))};
output += consumed_output_size;
out_header->render_info_size = consumed_output_size;
out_header->size += consumed_output_size;
return ResultSuccess;
}
Result InfoUpdater::CheckConsumedSize() {
if (CpuAddr(input) - CpuAddr(input_origin.data()) != expected_input_size) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
} else if (CpuAddr(output) - CpuAddr(output_origin.data()) != expected_output_size) {
return Service::Audio::ERR_INVALID_UPDATE_DATA;
}
return ResultSuccess;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,205 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "common/common_types.h"
#include "core/hle/service/audio/errors.h"
namespace AudioCore::AudioRenderer {
class BehaviorInfo;
class VoiceContext;
class MixContext;
class SinkContext;
class SplitterContext;
class EffectContext;
class MemoryPoolInfo;
class PerformanceManager;
class InfoUpdater {
struct UpdateDataHeader {
explicit UpdateDataHeader(u32 revision_) : revision{revision_} {}
/* 0x00 */ u32 revision;
/* 0x04 */ u32 behaviour_size{};
/* 0x08 */ u32 memory_pool_size{};
/* 0x0C */ u32 voices_size{};
/* 0x10 */ u32 voice_resources_size{};
/* 0x14 */ u32 effects_size{};
/* 0x18 */ u32 mix_size{};
/* 0x1C */ u32 sinks_size{};
/* 0x20 */ u32 performance_buffer_size{};
/* 0x24 */ char unk24[4];
/* 0x28 */ u32 render_info_size{};
/* 0x2C */ char unk2C[0x10];
/* 0x3C */ u32 size{sizeof(UpdateDataHeader)};
};
static_assert(sizeof(UpdateDataHeader) == 0x40, "UpdateDataHeader has the wrong size!");
public:
explicit InfoUpdater(std::span<const u8> input, std::span<u8> output, u32 process_handle,
BehaviorInfo& behaviour);
/**
* Update the voice channel resources.
*
* @param voice_context - Voice context to update.
* @return Result code.
*/
Result UpdateVoiceChannelResources(VoiceContext& voice_context);
/**
* Update voices.
*
* @param voice_context - Voice context to update.
* @param memory_pools - Memory pools to use for these voices.
* @param memory_pool_count - Number of memory pools.
* @return Result code.
*/
Result UpdateVoices(VoiceContext& voice_context, std::span<MemoryPoolInfo> memory_pools,
u32 memory_pool_count);
/**
* Update effects.
*
* @param effect_context - Effect context to update.
* @param renderer_active - Whether the AudioRenderer is active.
* @param memory_pools - Memory pools to use for these voices.
* @param memory_pool_count - Number of memory pools.
* @return Result code.
*/
Result UpdateEffects(EffectContext& effect_context, bool renderer_active,
std::span<MemoryPoolInfo> memory_pools, u32 memory_pool_count);
/**
* Update mixes.
*
* @param mix_context - Mix context to update.
* @param mix_buffer_count - Number of mix buffers.
* @param effect_context - Effect context to update effort order.
* @param splitter_context - Splitter context for the mixes.
* @return Result code.
*/
Result UpdateMixes(MixContext& mix_context, u32 mix_buffer_count, EffectContext& effect_context,
SplitterContext& splitter_context);
/**
* Update sinks.
*
* @param sink_context - Sink context to update.
* @param memory_pools - Memory pools to use for these voices.
* @param memory_pool_count - Number of memory pools.
* @return Result code.
*/
Result UpdateSinks(SinkContext& sink_context, std::span<MemoryPoolInfo> memory_pools,
u32 memory_pool_count);
/**
* Update memory pools.
*
* @param memory_pools - Memory pools to use for these voices.
* @param memory_pool_count - Number of memory pools.
* @return Result code.
*/
Result UpdateMemoryPools(std::span<MemoryPoolInfo> memory_pools, u32 memory_pool_count);
/**
* Update the performance buffer.
*
* @param output - Output buffer for performance metrics.
* @param output_size - Output buffer size.
* @param performance_manager - Performance manager..
* @return Result code.
*/
Result UpdatePerformanceBuffer(std::span<u8> output, u64 output_size,
PerformanceManager* performance_manager);
/**
* Update behaviour.
*
* @param behaviour - Behaviour to update.
* @return Result code.
*/
Result UpdateBehaviorInfo(BehaviorInfo& behaviour);
/**
* Update errors.
*
* @param behaviour - Behaviour to update.
* @return Result code.
*/
Result UpdateErrorInfo(BehaviorInfo& behaviour);
/**
* Update splitter.
*
* @param splitter_context - Splitter context to update.
* @return Result code.
*/
Result UpdateSplitterInfo(SplitterContext& splitter_context);
/**
* Update renderer info.
*
* @param elapsed_frames - Number of elapsed frames.
* @return Result code.
*/
Result UpdateRendererInfo(u64 elapsed_frames);
/**
* Check that the input.output sizes match their expected values.
*
* @return Result code.
*/
Result CheckConsumedSize();
private:
/**
* Update effects version 1.
*
* @param effect_context - Effect context to update.
* @param renderer_active - Is the AudioRenderer active?
* @param memory_pools - Memory pools to use for these voices.
* @param memory_pool_count - Number of memory pools.
* @return Result code.
*/
Result UpdateEffectsVersion1(EffectContext& effect_context, bool renderer_active,
std::span<MemoryPoolInfo> memory_pools, u32 memory_pool_count);
/**
* Update effects version 2.
*
* @param effect_context - Effect context to update.
* @param renderer_active - Is the AudioRenderer active?
* @param memory_pools - Memory pools to use for these voices.
* @param memory_pool_count - Number of memory pools.
* @return Result code.
*/
Result UpdateEffectsVersion2(EffectContext& effect_context, bool renderer_active,
std::span<MemoryPoolInfo> memory_pools, u32 memory_pool_count);
/// Input buffer
u8 const* input;
/// Input buffer start
std::span<const u8> input_origin;
/// Output buffer start
u8* output;
/// Output buffer start
std::span<u8> output_origin;
/// Input header
const UpdateDataHeader* in_header;
/// Output header
UpdateDataHeader* out_header;
/// Expected input size, see CheckConsumedSize
u64 expected_input_size;
/// Expected output size, see CheckConsumedSize
u64 expected_output_size;
/// Unused
u32 process_handle;
/// Behaviour
BehaviorInfo& behaviour;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,678 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/behavior/behavior_info.h"
#include "audio_core/renderer/command/command_buffer.h"
#include "audio_core/renderer/command/command_list_header.h"
#include "audio_core/renderer/command/command_processing_time_estimator.h"
#include "audio_core/renderer/effect/effect_biquad_filter_info.h"
#include "audio_core/renderer/effect/effect_delay_info.h"
#include "audio_core/renderer/effect/effect_reverb_info.h"
#include "audio_core/renderer/memory/memory_pool_info.h"
#include "audio_core/renderer/mix/mix_info.h"
#include "audio_core/renderer/sink/circular_buffer_sink_info.h"
#include "audio_core/renderer/sink/device_sink_info.h"
#include "audio_core/renderer/sink/sink_info_base.h"
#include "audio_core/renderer/voice/voice_info.h"
#include "audio_core/renderer/voice/voice_state.h"
namespace AudioCore::AudioRenderer {
template <typename T, CommandId Id>
T& CommandBuffer::GenerateStart(const s32 node_id) {
if (size + sizeof(T) >= command_list.size_bytes()) {
LOG_ERROR(
Service_Audio,
"Attempting to write commands beyond the end of allocated command buffer memory!");
UNREACHABLE();
}
auto& cmd{*std::construct_at<T>(reinterpret_cast<T*>(&command_list[size]))};
cmd.magic = CommandMagic;
cmd.enabled = true;
cmd.type = Id;
cmd.size = sizeof(T);
cmd.node_id = node_id;
return cmd;
}
template <typename T>
void CommandBuffer::GenerateEnd(T& cmd) {
cmd.estimated_process_time = time_estimator->Estimate(cmd);
estimated_process_time += cmd.estimated_process_time;
size += sizeof(T);
count++;
}
void CommandBuffer::GeneratePcmInt16Version1Command(const s32 node_id,
const MemoryPoolInfo& memory_pool_,
VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{
GenerateStart<PcmInt16DataSourceVersion1Command, CommandId::DataSourcePcmInt16Version1>(
node_id)};
cmd.src_quality = voice_info.src_quality;
cmd.output_index = buffer_count + channel;
cmd.flags = voice_info.flags & 3;
cmd.sample_rate = voice_info.sample_rate;
cmd.pitch = voice_info.pitch;
cmd.channel_index = channel;
cmd.channel_count = voice_info.channel_count;
for (u32 i = 0; i < MaxWaveBuffers; i++) {
voice_info.wavebuffers[i].Copy(cmd.wave_buffers[i]);
}
cmd.voice_state = memory_pool_.Translate(CpuAddr(&voice_state), sizeof(VoiceState));
GenerateEnd<PcmInt16DataSourceVersion1Command>(cmd);
}
void CommandBuffer::GeneratePcmInt16Version2Command(const s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{
GenerateStart<PcmInt16DataSourceVersion2Command, CommandId::DataSourcePcmInt16Version2>(
node_id)};
cmd.src_quality = voice_info.src_quality;
cmd.output_index = buffer_count + channel;
cmd.flags = voice_info.flags & 3;
cmd.sample_rate = voice_info.sample_rate;
cmd.pitch = voice_info.pitch;
cmd.channel_index = channel;
cmd.channel_count = voice_info.channel_count;
for (u32 i = 0; i < MaxWaveBuffers; i++) {
voice_info.wavebuffers[i].Copy(cmd.wave_buffers[i]);
}
cmd.voice_state = memory_pool->Translate(CpuAddr(&voice_state), sizeof(VoiceState));
GenerateEnd<PcmInt16DataSourceVersion2Command>(cmd);
}
void CommandBuffer::GeneratePcmFloatVersion1Command(const s32 node_id,
const MemoryPoolInfo& memory_pool_,
VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{
GenerateStart<PcmFloatDataSourceVersion1Command, CommandId::DataSourcePcmFloatVersion1>(
node_id)};
cmd.src_quality = voice_info.src_quality;
cmd.output_index = buffer_count + channel;
cmd.flags = voice_info.flags & 3;
cmd.sample_rate = voice_info.sample_rate;
cmd.pitch = voice_info.pitch;
cmd.channel_index = channel;
cmd.channel_count = voice_info.channel_count;
for (u32 i = 0; i < MaxWaveBuffers; i++) {
voice_info.wavebuffers[i].Copy(cmd.wave_buffers[i]);
}
cmd.voice_state = memory_pool_.Translate(CpuAddr(&voice_state), sizeof(VoiceState));
GenerateEnd<PcmFloatDataSourceVersion1Command>(cmd);
}
void CommandBuffer::GeneratePcmFloatVersion2Command(const s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{
GenerateStart<PcmFloatDataSourceVersion2Command, CommandId::DataSourcePcmFloatVersion2>(
node_id)};
cmd.src_quality = voice_info.src_quality;
cmd.output_index = buffer_count + channel;
cmd.flags = voice_info.flags & 3;
cmd.sample_rate = voice_info.sample_rate;
cmd.pitch = voice_info.pitch;
cmd.channel_index = channel;
cmd.channel_count = voice_info.channel_count;
for (u32 i = 0; i < MaxWaveBuffers; i++) {
voice_info.wavebuffers[i].Copy(cmd.wave_buffers[i]);
}
cmd.voice_state = memory_pool->Translate(CpuAddr(&voice_state), sizeof(VoiceState));
GenerateEnd<PcmFloatDataSourceVersion2Command>(cmd);
}
void CommandBuffer::GenerateAdpcmVersion1Command(const s32 node_id,
const MemoryPoolInfo& memory_pool_,
VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{
GenerateStart<AdpcmDataSourceVersion1Command, CommandId::DataSourceAdpcmVersion1>(node_id)};
cmd.src_quality = voice_info.src_quality;
cmd.output_index = buffer_count + channel;
cmd.flags = voice_info.flags & 3;
cmd.sample_rate = voice_info.sample_rate;
cmd.pitch = voice_info.pitch;
for (u32 i = 0; i < MaxWaveBuffers; i++) {
voice_info.wavebuffers[i].Copy(cmd.wave_buffers[i]);
}
cmd.voice_state = memory_pool_.Translate(CpuAddr(&voice_state), sizeof(VoiceState));
cmd.data_address = voice_info.data_address.GetReference(true);
cmd.data_size = voice_info.data_address.GetSize();
GenerateEnd<AdpcmDataSourceVersion1Command>(cmd);
}
void CommandBuffer::GenerateAdpcmVersion2Command(const s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{
GenerateStart<AdpcmDataSourceVersion2Command, CommandId::DataSourceAdpcmVersion2>(node_id)};
cmd.src_quality = voice_info.src_quality;
cmd.output_index = buffer_count + channel;
cmd.flags = voice_info.flags & 3;
cmd.sample_rate = voice_info.sample_rate;
cmd.pitch = voice_info.pitch;
cmd.channel_index = channel;
cmd.channel_count = voice_info.channel_count;
for (u32 i = 0; i < MaxWaveBuffers; i++) {
voice_info.wavebuffers[i].Copy(cmd.wave_buffers[i]);
}
cmd.voice_state = memory_pool->Translate(CpuAddr(&voice_state), sizeof(VoiceState));
cmd.data_address = voice_info.data_address.GetReference(true);
cmd.data_size = voice_info.data_address.GetSize();
GenerateEnd<AdpcmDataSourceVersion2Command>(cmd);
}
void CommandBuffer::GenerateVolumeCommand(const s32 node_id, const s16 buffer_offset,
const s16 input_index, const f32 volume,
const u8 precision) {
auto& cmd{GenerateStart<VolumeCommand, CommandId::Volume>(node_id)};
cmd.precision = precision;
cmd.input_index = buffer_offset + input_index;
cmd.output_index = buffer_offset + input_index;
cmd.volume = volume;
GenerateEnd<VolumeCommand>(cmd);
}
void CommandBuffer::GenerateVolumeRampCommand(const s32 node_id, VoiceInfo& voice_info,
const s16 buffer_count, const u8 precision) {
auto& cmd{GenerateStart<VolumeRampCommand, CommandId::VolumeRamp>(node_id)};
cmd.input_index = buffer_count;
cmd.output_index = buffer_count;
cmd.prev_volume = voice_info.prev_volume;
cmd.volume = voice_info.volume;
cmd.precision = precision;
GenerateEnd<VolumeRampCommand>(cmd);
}
void CommandBuffer::GenerateBiquadFilterCommand(const s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel,
const u32 biquad_index,
const bool use_float_processing) {
auto& cmd{GenerateStart<BiquadFilterCommand, CommandId::BiquadFilter>(node_id)};
cmd.input = buffer_count + channel;
cmd.output = buffer_count + channel;
cmd.biquad = voice_info.biquads[biquad_index];
cmd.state = memory_pool->Translate(CpuAddr(voice_state.biquad_states[biquad_index].data()),
MaxBiquadFilters * sizeof(VoiceState::BiquadFilterState));
cmd.needs_init = !voice_info.biquad_initialized[biquad_index];
cmd.use_float_processing = use_float_processing;
GenerateEnd<BiquadFilterCommand>(cmd);
}
void CommandBuffer::GenerateBiquadFilterCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 buffer_offset, const s8 channel,
const bool needs_init,
const bool use_float_processing) {
auto& cmd{GenerateStart<BiquadFilterCommand, CommandId::BiquadFilter>(node_id)};
const auto& parameter{
*reinterpret_cast<BiquadFilterInfo::ParameterVersion1*>(effect_info.GetParameter())};
const auto state{
reinterpret_cast<VoiceState::BiquadFilterState*>(effect_info.GetStateBuffer())};
cmd.input = buffer_offset + parameter.inputs[channel];
cmd.output = buffer_offset + parameter.outputs[channel];
cmd.biquad.b = parameter.b;
cmd.biquad.a = parameter.a;
cmd.state = memory_pool->Translate(CpuAddr(state),
MaxBiquadFilters * sizeof(VoiceState::BiquadFilterState));
cmd.needs_init = needs_init;
cmd.use_float_processing = use_float_processing;
GenerateEnd<BiquadFilterCommand>(cmd);
}
void CommandBuffer::GenerateMixCommand(const s32 node_id, const s16 input_index,
const s16 output_index, const s16 buffer_offset,
const f32 volume, const u8 precision) {
auto& cmd{GenerateStart<MixCommand, CommandId::Mix>(node_id)};
cmd.input_index = input_index;
cmd.output_index = output_index;
cmd.volume = volume;
cmd.precision = precision;
GenerateEnd<MixCommand>(cmd);
}
void CommandBuffer::GenerateMixRampCommand(const s32 node_id,
[[maybe_unused]] const s16 buffer_count,
const s16 input_index, const s16 output_index,
const f32 volume, const f32 prev_volume,
const CpuAddr prev_samples, const u8 precision) {
if (volume == 0.0f && prev_volume == 0.0f) {
return;
}
auto& cmd{GenerateStart<MixRampCommand, CommandId::MixRamp>(node_id)};
cmd.input_index = input_index;
cmd.output_index = output_index;
cmd.prev_volume = prev_volume;
cmd.volume = volume;
cmd.previous_sample = prev_samples;
cmd.precision = precision;
GenerateEnd<MixRampCommand>(cmd);
}
void CommandBuffer::GenerateMixRampGroupedCommand(const s32 node_id, const s16 buffer_count,
const s16 input_index, s16 output_index,
std::span<const f32> volumes,
std::span<const f32> prev_volumes,
const CpuAddr prev_samples, const u8 precision) {
auto& cmd{GenerateStart<MixRampGroupedCommand, CommandId::MixRampGrouped>(node_id)};
cmd.buffer_count = buffer_count;
for (s32 i = 0; i < buffer_count; i++) {
cmd.inputs[i] = input_index;
cmd.outputs[i] = output_index++;
cmd.prev_volumes[i] = prev_volumes[i];
cmd.volumes[i] = volumes[i];
}
cmd.previous_samples = prev_samples;
cmd.precision = precision;
GenerateEnd<MixRampGroupedCommand>(cmd);
}
void CommandBuffer::GenerateDepopPrepareCommand(const s32 node_id, const VoiceState& voice_state,
std::span<const s32> buffer, const s16 buffer_count,
s16 buffer_offset, const bool was_playing) {
auto& cmd{GenerateStart<DepopPrepareCommand, CommandId::DepopPrepare>(node_id)};
cmd.enabled = was_playing;
for (u32 i = 0; i < MaxMixBuffers; i++) {
cmd.inputs[i] = buffer_offset++;
}
cmd.previous_samples = memory_pool->Translate(CpuAddr(voice_state.previous_samples.data()),
MaxMixBuffers * sizeof(s32));
cmd.buffer_count = buffer_count;
cmd.depop_buffer = memory_pool->Translate(CpuAddr(buffer.data()), buffer_count * sizeof(s32));
GenerateEnd<DepopPrepareCommand>(cmd);
}
void CommandBuffer::GenerateDepopForMixBuffersCommand(const s32 node_id, const MixInfo& mix_info,
std::span<const s32> depop_buffer) {
auto& cmd{GenerateStart<DepopForMixBuffersCommand, CommandId::DepopForMixBuffers>(node_id)};
cmd.input = mix_info.buffer_offset;
cmd.count = mix_info.buffer_count;
cmd.decay = mix_info.sample_rate == TargetSampleRate ? 0.96218872f : 0.94369507f;
cmd.depop_buffer =
memory_pool->Translate(CpuAddr(depop_buffer.data()), mix_info.buffer_count * sizeof(s32));
GenerateEnd<DepopForMixBuffersCommand>(cmd);
}
void CommandBuffer::GenerateDelayCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 buffer_offset) {
auto& cmd{GenerateStart<DelayCommand, CommandId::Delay>(node_id)};
const auto& parameter{
*reinterpret_cast<DelayInfo::ParameterVersion1*>(effect_info.GetParameter())};
const auto state{effect_info.GetStateBuffer()};
if (IsChannelCountValid(parameter.channel_count)) {
const auto state_buffer{memory_pool->Translate(CpuAddr(state), sizeof(DelayInfo::State))};
if (state_buffer) {
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
cmd.inputs[channel] = static_cast<s8>(buffer_offset + parameter.inputs[channel]);
cmd.outputs[channel] = static_cast<s8>(buffer_offset + parameter.outputs[channel]);
}
cmd.parameter = parameter;
cmd.effect_enabled = effect_info.IsEnabled();
cmd.state = state_buffer;
cmd.workbuffer = effect_info.GetWorkbuffer(-1);
}
}
GenerateEnd<DelayCommand>(cmd);
}
void CommandBuffer::GenerateUpsampleCommand(const s32 node_id, const s16 buffer_offset,
UpsamplerInfo& upsampler_info, const u32 input_count,
std::span<const s8> inputs, const s16 buffer_count,
const u32 sample_count_, const u32 sample_rate_) {
auto& cmd{GenerateStart<UpsampleCommand, CommandId::Upsample>(node_id)};
cmd.samples_buffer = memory_pool->Translate(upsampler_info.samples_pos,
upsampler_info.sample_count * sizeof(s32));
cmd.inputs = memory_pool->Translate(CpuAddr(upsampler_info.inputs.data()), MaxChannels);
cmd.buffer_count = buffer_count;
cmd.unk_20 = 0;
cmd.source_sample_count = sample_count_;
cmd.source_sample_rate = sample_rate_;
upsampler_info.input_count = input_count;
for (u32 i = 0; i < input_count; i++) {
upsampler_info.inputs[i] = buffer_offset + inputs[i];
}
cmd.upsampler_info = memory_pool->Translate(CpuAddr(&upsampler_info), sizeof(UpsamplerInfo));
GenerateEnd<UpsampleCommand>(cmd);
}
void CommandBuffer::GenerateDownMix6chTo2chCommand(const s32 node_id, std::span<const s8> inputs,
const s16 buffer_offset,
std::span<const f32> downmix_coeff) {
auto& cmd{GenerateStart<DownMix6chTo2chCommand, CommandId::DownMix6chTo2ch>(node_id)};
for (u32 i = 0; i < MaxChannels; i++) {
cmd.inputs[i] = buffer_offset + inputs[i];
cmd.outputs[i] = buffer_offset + inputs[i];
}
for (u32 i = 0; i < 4; i++) {
cmd.down_mix_coeff[i] = downmix_coeff[i];
}
GenerateEnd<DownMix6chTo2chCommand>(cmd);
}
void CommandBuffer::GenerateAuxCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 input_index, const s16 output_index,
const s16 buffer_offset, const u32 update_count,
const u32 count_max, const u32 write_offset) {
auto& cmd{GenerateStart<AuxCommand, CommandId::Aux>(node_id)};
if (effect_info.GetSendBuffer() != 0 && effect_info.GetReturnBuffer() != 0) {
cmd.input = buffer_offset + input_index;
cmd.output = buffer_offset + output_index;
cmd.send_buffer_info = effect_info.GetSendBufferInfo();
cmd.send_buffer = effect_info.GetSendBuffer();
cmd.return_buffer_info = effect_info.GetReturnBufferInfo();
cmd.return_buffer = effect_info.GetReturnBuffer();
cmd.count_max = count_max;
cmd.write_offset = write_offset;
cmd.update_count = update_count;
cmd.effect_enabled = effect_info.IsEnabled();
}
GenerateEnd<AuxCommand>(cmd);
}
void CommandBuffer::GenerateDeviceSinkCommand(const s32 node_id, const s16 buffer_offset,
SinkInfoBase& sink_info, const u32 session_id,
std::span<s32> samples_buffer) {
auto& cmd{GenerateStart<DeviceSinkCommand, CommandId::DeviceSink>(node_id)};
const auto& parameter{
*reinterpret_cast<DeviceSinkInfo::DeviceInParameter*>(sink_info.GetParameter())};
auto state{*reinterpret_cast<DeviceSinkInfo::DeviceState*>(sink_info.GetState())};
cmd.session_id = session_id;
if (state.upsampler_info != nullptr) {
const auto size_{state.upsampler_info->sample_count * parameter.input_count};
const auto size_bytes{size_ * sizeof(s32)};
const auto addr{memory_pool->Translate(state.upsampler_info->samples_pos, size_bytes)};
cmd.sample_buffer = {reinterpret_cast<s32*>(addr),
parameter.input_count * state.upsampler_info->sample_count};
} else {
cmd.sample_buffer = samples_buffer;
}
cmd.input_count = parameter.input_count;
for (u32 i = 0; i < parameter.input_count; i++) {
cmd.inputs[i] = buffer_offset + parameter.inputs[i];
}
GenerateEnd<DeviceSinkCommand>(cmd);
}
void CommandBuffer::GenerateCircularBufferSinkCommand(const s32 node_id, SinkInfoBase& sink_info,
const s16 buffer_offset) {
auto& cmd{GenerateStart<CircularBufferSinkCommand, CommandId::CircularBufferSink>(node_id)};
const auto& parameter{*reinterpret_cast<CircularBufferSinkInfo::CircularBufferInParameter*>(
sink_info.GetParameter())};
auto state{
*reinterpret_cast<CircularBufferSinkInfo::CircularBufferState*>(sink_info.GetState())};
cmd.input_count = parameter.input_count;
for (u32 i = 0; i < parameter.input_count; i++) {
cmd.inputs[i] = buffer_offset + parameter.inputs[i];
}
cmd.address = state.address_info.GetReference(true);
cmd.size = parameter.size;
cmd.pos = state.current_pos;
GenerateEnd<CircularBufferSinkCommand>(cmd);
}
void CommandBuffer::GenerateReverbCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 buffer_offset,
const bool long_size_pre_delay_supported) {
auto& cmd{GenerateStart<ReverbCommand, CommandId::Reverb>(node_id)};
const auto& parameter{
*reinterpret_cast<ReverbInfo::ParameterVersion2*>(effect_info.GetParameter())};
const auto state{effect_info.GetStateBuffer()};
if (IsChannelCountValid(parameter.channel_count)) {
const auto state_buffer{memory_pool->Translate(CpuAddr(state), sizeof(ReverbInfo::State))};
if (state_buffer) {
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
cmd.inputs[channel] = buffer_offset + parameter.inputs[channel];
cmd.outputs[channel] = buffer_offset + parameter.outputs[channel];
}
cmd.parameter = parameter;
cmd.effect_enabled = effect_info.IsEnabled();
cmd.state = state_buffer;
cmd.workbuffer = effect_info.GetWorkbuffer(-1);
cmd.long_size_pre_delay_supported = long_size_pre_delay_supported;
}
}
GenerateEnd<ReverbCommand>(cmd);
}
void CommandBuffer::GenerateI3dl2ReverbCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 buffer_offset) {
auto& cmd{GenerateStart<I3dl2ReverbCommand, CommandId::I3dl2Reverb>(node_id)};
const auto& parameter{
*reinterpret_cast<I3dl2ReverbInfo::ParameterVersion1*>(effect_info.GetParameter())};
const auto state{effect_info.GetStateBuffer()};
if (IsChannelCountValid(parameter.channel_count)) {
const auto state_buffer{
memory_pool->Translate(CpuAddr(state), sizeof(I3dl2ReverbInfo::State))};
if (state_buffer) {
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
cmd.inputs[channel] = buffer_offset + parameter.inputs[channel];
cmd.outputs[channel] = buffer_offset + parameter.outputs[channel];
}
cmd.parameter = parameter;
cmd.effect_enabled = effect_info.IsEnabled();
cmd.state = state_buffer;
cmd.workbuffer = effect_info.GetWorkbuffer(-1);
}
}
GenerateEnd<I3dl2ReverbCommand>(cmd);
}
void CommandBuffer::GeneratePerformanceCommand(const s32 node_id, const PerformanceState state,
const PerformanceEntryAddresses& entry_addresses) {
auto& cmd{GenerateStart<PerformanceCommand, CommandId::Performance>(node_id)};
cmd.state = state;
cmd.entry_address = entry_addresses;
GenerateEnd<PerformanceCommand>(cmd);
}
void CommandBuffer::GenerateClearMixCommand(const s32 node_id) {
auto& cmd{GenerateStart<ClearMixBufferCommand, CommandId::ClearMixBuffer>(node_id)};
GenerateEnd<ClearMixBufferCommand>(cmd);
}
void CommandBuffer::GenerateCopyMixBufferCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 buffer_offset, const s8 channel) {
auto& cmd{GenerateStart<CopyMixBufferCommand, CommandId::CopyMixBuffer>(node_id)};
const auto& parameter{
*reinterpret_cast<BiquadFilterInfo::ParameterVersion1*>(effect_info.GetParameter())};
cmd.input_index = buffer_offset + parameter.inputs[channel];
cmd.output_index = buffer_offset + parameter.outputs[channel];
GenerateEnd<CopyMixBufferCommand>(cmd);
}
void CommandBuffer::GenerateLightLimiterCommand(
const s32 node_id, const s16 buffer_offset,
const LightLimiterInfo::ParameterVersion1& parameter, const LightLimiterInfo::State& state,
const bool enabled, const CpuAddr workbuffer) {
auto& cmd{GenerateStart<LightLimiterVersion1Command, CommandId::LightLimiterVersion1>(node_id)};
if (IsChannelCountValid(parameter.channel_count)) {
const auto state_buffer{
memory_pool->Translate(CpuAddr(&state), sizeof(LightLimiterInfo::State))};
if (state_buffer) {
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
cmd.inputs[channel] = buffer_offset + parameter.inputs[channel];
cmd.outputs[channel] = buffer_offset + parameter.outputs[channel];
}
std::memcpy(&cmd.parameter, &parameter, sizeof(LightLimiterInfo::ParameterVersion1));
cmd.effect_enabled = enabled;
cmd.state = state_buffer;
cmd.workbuffer = workbuffer;
}
}
GenerateEnd<LightLimiterVersion1Command>(cmd);
}
void CommandBuffer::GenerateLightLimiterCommand(
const s32 node_id, const s16 buffer_offset,
const LightLimiterInfo::ParameterVersion2& parameter,
const LightLimiterInfo::StatisticsInternal& statistics, const LightLimiterInfo::State& state,
const bool enabled, const CpuAddr workbuffer) {
auto& cmd{GenerateStart<LightLimiterVersion2Command, CommandId::LightLimiterVersion2>(node_id)};
if (IsChannelCountValid(parameter.channel_count)) {
const auto state_buffer{
memory_pool->Translate(CpuAddr(&state), sizeof(LightLimiterInfo::State))};
if (state_buffer) {
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
cmd.inputs[channel] = buffer_offset + parameter.inputs[channel];
cmd.outputs[channel] = buffer_offset + parameter.outputs[channel];
}
cmd.parameter = parameter;
cmd.effect_enabled = enabled;
cmd.state = state_buffer;
if (cmd.parameter.statistics_enabled) {
cmd.result_state = memory_pool->Translate(
CpuAddr(&statistics), sizeof(LightLimiterInfo::StatisticsInternal));
} else {
cmd.result_state = 0;
}
cmd.workbuffer = workbuffer;
}
}
GenerateEnd<LightLimiterVersion2Command>(cmd);
}
void CommandBuffer::GenerateMultitapBiquadFilterCommand(const s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel) {
auto& cmd{GenerateStart<MultiTapBiquadFilterCommand, CommandId::MultiTapBiquadFilter>(node_id)};
cmd.input = buffer_count + channel;
cmd.output = buffer_count + channel;
cmd.biquads = voice_info.biquads;
cmd.states[0] =
memory_pool->Translate(CpuAddr(voice_state.biquad_states[0].data()),
MaxBiquadFilters * sizeof(VoiceState::BiquadFilterState));
cmd.states[1] =
memory_pool->Translate(CpuAddr(voice_state.biquad_states[1].data()),
MaxBiquadFilters * sizeof(VoiceState::BiquadFilterState));
cmd.needs_init[0] = !voice_info.biquad_initialized[0];
cmd.needs_init[1] = !voice_info.biquad_initialized[1];
cmd.filter_tap_count = MaxBiquadFilters;
GenerateEnd<MultiTapBiquadFilterCommand>(cmd);
}
void CommandBuffer::GenerateCaptureCommand(const s32 node_id, EffectInfoBase& effect_info,
const s16 input_index, const s16 output_index,
const s16 buffer_offset, const u32 update_count,
const u32 count_max, const u32 write_offset) {
auto& cmd{GenerateStart<CaptureCommand, CommandId::Capture>(node_id)};
if (effect_info.GetSendBuffer()) {
cmd.input = buffer_offset + input_index;
cmd.output = buffer_offset + output_index;
cmd.send_buffer_info = effect_info.GetSendBufferInfo();
cmd.send_buffer = effect_info.GetSendBuffer();
cmd.count_max = count_max;
cmd.write_offset = write_offset;
cmd.update_count = update_count;
cmd.effect_enabled = effect_info.IsEnabled();
}
GenerateEnd<CaptureCommand>(cmd);
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,457 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/renderer/command/commands.h"
#include "audio_core/renderer/effect/effect_light_limiter_info.h"
#include "audio_core/renderer/performance/performance_manager.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
struct UpsamplerInfo;
struct VoiceState;
class EffectInfoBase;
class ICommandProcessingTimeEstimator;
class MixInfo;
class MemoryPoolInfo;
class SinkInfoBase;
class VoiceInfo;
/**
* Utility functions to generate and add commands into the current command list.
*/
class CommandBuffer {
public:
/**
* Generate a PCM s16 version 1 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param memory_pool - Memory pool for translating buffer addresses to the DSP.
* @param voice_info - The voice info this command is generated from.
* @param voice_state - The voice state the DSP will use for this command.
* @param buffer_count - Number of mix buffers in use,
* data will be read into this index + channel.
* @param channel - Channel index for this command.
*/
void GeneratePcmInt16Version1Command(s32 node_id, const MemoryPoolInfo& memory_pool,
VoiceInfo& voice_info, const VoiceState& voice_state,
s16 buffer_count, s8 channel);
/**
* Generate a PCM s16 version 2 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_info - The voice info this command is generated from.
* @param voice_state - The voice state the DSP will use for this command.
* @param buffer_count - Number of mix buffers in use,
* data will be read into this index + channel.
* @param channel - Channel index for this command.
*/
void GeneratePcmInt16Version2Command(s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state, s16 buffer_count,
s8 channel);
/**
* Generate a PCM f32 version 1 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param memory_pool - Memory pool for translating buffer addresses to the DSP.
* @param voice_info - The voice info this command is generated from.
* @param voice_state - The voice state the DSP will use for this command.
* @param buffer_count - Number of mix buffers in use,
* data will be read into this index + channel.
* @param channel - Channel index for this command.
*/
void GeneratePcmFloatVersion1Command(s32 node_id, const MemoryPoolInfo& memory_pool,
VoiceInfo& voice_info, const VoiceState& voice_state,
s16 buffer_count, s8 channel);
/**
* Generate a PCM f32 version 2 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_info - The voice info this command is generated from.
* @param voice_state - The voice state the DSP will use for this command.
* @param buffer_count - Number of mix buffers in use,
* data will be read into this index + channel.
* @param channel - Channel index for this command.
*/
void GeneratePcmFloatVersion2Command(s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state, s16 buffer_count,
s8 channel);
/**
* Generate an ADPCM version 1 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param memory_pool - Memory pool for translating buffer addresses to the DSP.
* @param voice_info - The voice info this command is generated from.
* @param voice_state - The voice state the DSP will use for this command.
* @param buffer_count - Number of mix buffers in use,
* data will be read into this index + channel.
* @param channel - Channel index for this command.
*/
void GenerateAdpcmVersion1Command(s32 node_id, const MemoryPoolInfo& memory_pool,
VoiceInfo& voice_info, const VoiceState& voice_state,
s16 buffer_count, s8 channel);
/**
* Generate an ADPCM version 2 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_info - The voice info this command is generated from.
* @param voice_state - The voice state the DSP will use for this command.
* @param buffer_count - Number of mix buffers in use,
* data will be read into this index + channel.
* @param channel - Channel index for this command.
*/
void GenerateAdpcmVersion2Command(s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state, s16 buffer_count, s8 channel);
/**
* Generate a volume command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_offset - Base mix buffer index to generate this command at.
* @param input_index - Channel index and mix buffer offset for this command.
* @param volume - Mix volume added to the input samples.
* @param precision - Number of decimal bits for fixed point operations.
*/
void GenerateVolumeCommand(s32 node_id, s16 buffer_offset, s16 input_index, f32 volume,
u8 precision);
/**
* Generate a volume ramp command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_info - The voice info this command takes its volumes from.
* @param buffer_count - Number of active mix buffers, command will generate at this index.
* @param precision - Number of decimal bits for fixed point operations.
*/
void GenerateVolumeRampCommand(s32 node_id, VoiceInfo& voice_info, s16 buffer_count,
u8 precision);
/**
* Generate a biquad filter command from a voice, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_info - The voice info this command takes biquad parameters from.
* @param voice_state - Used by the AudioRenderer to track previous samples.
* @param buffer_count - Number of active mix buffers,
* command will generate at this index + channel.
* @param channel - Channel index for this filter to work on.
* @param biquad_index - Which biquad filter to use for this command (0-1).
* @param use_float_processing - Should int or float processing be used?
*/
void GenerateBiquadFilterCommand(s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state, s16 buffer_count, s8 channel,
u32 biquad_index, bool use_float_processing);
/**
* Generate a biquad filter effect command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - The effect info this command takes biquad parameters from.
* @param buffer_offset - Mix buffer offset this command will use,
* command will generate at this index + channel.
* @param channel - Channel index for this filter to work on.
* @param needs_init - True if the biquad state needs initialisation.
* @param use_float_processing - Should int or float processing be used?
*/
void GenerateBiquadFilterCommand(s32 node_id, EffectInfoBase& effect_info, s16 buffer_offset,
s8 channel, bool needs_init, bool use_float_processing);
/**
* Generate a mix command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param input_index - Input mix buffer index for this command.
* Added to the buffer offset.
* @param output_index - Output mix buffer index for this command.
* Added to the buffer offset.
* @param buffer_offset - Mix buffer offset this command will use.
* @param volume - Volume to be applied to the input.
* @param precision - Number of decimal bits for fixed point operations.
*/
void GenerateMixCommand(s32 node_id, s16 input_index, s16 output_index, s16 buffer_offset,
f32 volume, u8 precision);
/**
* Generate a mix ramp command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_count - Number of active mix buffers.
* @param input_index - Input mix buffer index for this command.
* Added to buffer_count.
* @param output_index - Output mix buffer index for this command.
* Added to buffer_count.
* @param volume - Current mix volume used for calculating the ramp.
* @param prev_volume - Previous mix volume, used for calculating the ramp,
* also applied to the input.
* @param precision - Number of decimal bits for fixed point operations.
*/
void GenerateMixRampCommand(s32 node_id, s16 buffer_count, s16 input_index, s16 output_index,
f32 volume, f32 prev_volume, CpuAddr prev_samples, u8 precision);
/**
* Generate a mix ramp grouped command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_count - Number of active mix buffers.
* @param input_index - Input mix buffer index for this command.
* Added to buffer_count.
* @param output_index - Output mix buffer index for this command.
* Added to buffer_count.
* @param volumes - Current mix volumes used for calculating the ramp.
* @param prev_volumes - Previous mix volumes, used for calculating the ramp,
* also applied to the input.
* @param precision - Number of decimal bits for fixed point operations.
*/
void GenerateMixRampGroupedCommand(s32 node_id, s16 buffer_count, s16 input_index,
s16 output_index, std::span<const f32> volumes,
std::span<const f32> prev_volumes, CpuAddr prev_samples,
u8 precision);
/**
* Generate a depop prepare command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_state - State to track the previous depop samples for each mix buffer.
* @param buffer - State to track the current depop samples for each mix buffer.
* @param buffer_count - Number of active mix buffers.
* @param buffer_offset - Base mix buffer index to generate the channel depops at.
* @param was_playing - Command only needs to work if the voice was previously playing.
*/
void GenerateDepopPrepareCommand(s32 node_id, const VoiceState& voice_state,
std::span<const s32> buffer, s16 buffer_count,
s16 buffer_offset, bool was_playing);
/**
* Generate a depop command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param mix_info - Mix info to get the buffer count and base offsets from.
* @param depop_buffer - Buffer of current depop sample values to be added to the input
* channels.
*/
void GenerateDepopForMixBuffersCommand(s32 node_id, const MixInfo& mix_info,
std::span<const s32> depop_buffer);
/**
* Generate a delay command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - Delay effect info to generate this command from.
* @param buffer_offset - Base mix buffer offset to apply the apply the delay.
*/
void GenerateDelayCommand(s32 node_id, EffectInfoBase& effect_info, s16 buffer_offset);
/**
* Generate an upsample command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_offset - Base mix buffer offset to upsample.
* @param upsampler_info - Upsampler info to control the upsampling.
* @param input_count - Number of input channels to upsample.
* @param inputs - Input mix buffer indexes.
* @param buffer_count - Number of active mix buffers.
* @param sample_count - Source sample count of the input.
* @param sample_rate - Source sample rate of the input.
*/
void GenerateUpsampleCommand(s32 node_id, s16 buffer_offset, UpsamplerInfo& upsampler_info,
u32 input_count, std::span<const s8> inputs, s16 buffer_count,
u32 sample_count, u32 sample_rate);
/**
* Generate a downmix 6 -> 2 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param inputs - Input mix buffer indexes.
* @param buffer_offset - Base mix buffer offset of the channels to downmix.
* @param downmix_coeff - Downmixing coefficients.
*/
void GenerateDownMix6chTo2chCommand(s32 node_id, std::span<const s8> inputs, s16 buffer_offset,
std::span<const f32> downmix_coeff);
/**
* Generate an aux buffer command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - Aux effect info to generate this command from.
* @param input_index - Input mix buffer index for this command.
* Added to buffer_offset.
* @param output_index - Output mix buffer index for this command.
* Added to buffer_offset.
* @param buffer_offset - Base mix buffer offset to use.
* @param update_count - Number of samples to write back to the game as updated, can be 0.
* @param count_max - Maximum number of samples to read or write.
* @param write_offset - Current read or write offset within the buffer.
*/
void GenerateAuxCommand(s32 node_id, EffectInfoBase& effect_info, s16 input_index,
s16 output_index, s16 buffer_offset, u32 update_count, u32 count_max,
u32 write_offset);
/**
* Generate a device sink command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_offset - Base mix buffer offset to use.
* @param sink_info - The sink_info to generate this command from.
* @session_id - System session id this command is generated from.
* @samples_buffer - The buffer to be sent to the sink if upsampling is not used.
*/
void GenerateDeviceSinkCommand(s32 node_id, s16 buffer_offset, SinkInfoBase& sink_info,
u32 session_id, std::span<s32> samples_buffer);
/**
* Generate a circular buffer sink command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param sink_info - The sink_info to generate this command from.
* @param buffer_offset - Base mix buffer offset to use.
*/
void GenerateCircularBufferSinkCommand(s32 node_id, SinkInfoBase& sink_info, s16 buffer_offset);
/**
* Generate a reverb command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - Reverb effect info to generate this command from.
* @param buffer_offset - Base mix buffer offset to use.
* @param long_size_pre_delay_supported - Should a longer pre-delay time be used before reverb
* begins?
*/
void GenerateReverbCommand(s32 node_id, EffectInfoBase& effect_info, s16 buffer_offset,
bool long_size_pre_delay_supported);
/**
* Generate an I3DL2 reverb command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - I3DL2Reverb effect info to generate this command from.
* @param buffer_offset - Base mix buffer offset to use.
*/
void GenerateI3dl2ReverbCommand(s32 node_id, EffectInfoBase& effect_info, s16 buffer_offset);
/**
* Generate a performance command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param state - State of the performance.
* @param entry_addresses - The addresses to be filled in by the AudioRenderer.
*/
void GeneratePerformanceCommand(s32 node_id, PerformanceState state,
const PerformanceEntryAddresses& entry_addresses);
/**
* Generate a clear mix command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
*/
void GenerateClearMixCommand(s32 node_id);
/**
* Generate a copy mix command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - BiquadFilter effect info to generate this command from.
* @param buffer_offset - Base mix buffer offset to use.
* @param channel - Index to the effect's parameters input indexes for this command.
*/
void GenerateCopyMixBufferCommand(s32 node_id, EffectInfoBase& effect_info, s16 buffer_offset,
s8 channel);
/**
* Generate a light limiter version 1 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_offset - Base mix buffer offset to use.
* @param parameter - Effect parameter to generate from.
* @param state - State used by the AudioRenderer between commands.
* @param enabled - Is this command enabled?
* @param workbuffer - Game-supplied memory for the state.
*/
void GenerateLightLimiterCommand(s32 node_id, s16 buffer_offset,
const LightLimiterInfo::ParameterVersion1& parameter,
const LightLimiterInfo::State& state, bool enabled,
CpuAddr workbuffer);
/**
* Generate a light limiter version 2 command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param buffer_offset - Base mix buffer offset to use.
* @param parameter - Effect parameter to generate from.
* @param statistics - Statistics reported by the AudioRenderer on the limiter's state.
* @param state - State used by the AudioRenderer between commands.
* @param enabled - Is this command enabled?
* @param workbuffer - Game-supplied memory for the state.
*/
void GenerateLightLimiterCommand(s32 node_id, s16 buffer_offset,
const LightLimiterInfo::ParameterVersion2& parameter,
const LightLimiterInfo::StatisticsInternal& statistics,
const LightLimiterInfo::State& state, bool enabled,
CpuAddr workbuffer);
/**
* Generate a multitap biquad filter command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param voice_info - The voice info this command takes biquad parameters from.
* @param voice_state - Used by the AudioRenderer to track previous samples.
* @param buffer_count - Number of active mix buffers,
* command will generate at this index + channel.
* @param channel - Channel index for this filter to work on.
*/
void GenerateMultitapBiquadFilterCommand(s32 node_id, VoiceInfo& voice_info,
const VoiceState& voice_state, s16 buffer_count,
s8 channel);
/**
* Generate a capture command, adding it to the command list.
*
* @param node_id - Node id of the voice this command is generated for.
* @param effect_info - Capture effect info to generate this command from.
* @param input_index - Input mix buffer index for this command.
* Added to buffer_offset.
* @param output_index - Output mix buffer index for this command (unused).
* Added to buffer_offset.
* @param buffer_offset - Base mix buffer offset to use.
* @param update_count - Number of samples to write back to the game as updated, can be 0.
* @param count_max - Maximum number of samples to read or write.
* @param write_offset - Current read or write offset within the buffer.
*/
void GenerateCaptureCommand(s32 node_id, EffectInfoBase& effect_info, s16 input_index,
s16 output_index, s16 buffer_offset, u32 update_count,
u32 count_max, u32 write_offset);
/// Command list buffer generated commands will be added to
std::span<u8> command_list{};
/// Input sample count, unused
u32 sample_count{};
/// Input sample rate, unused
u32 sample_rate{};
/// Current size of the command buffer
u64 size{};
/// Current number of commands added
u32 count{};
/// Current estimated processing time for all commands
u32 estimated_process_time{};
/// Used for mapping buffers for the AudioRenderer
MemoryPoolInfo* memory_pool{};
/// Used for estimating command process times
ICommandProcessingTimeEstimator* time_estimator{};
/// Used to check which rendering features are currently enabled
BehaviorInfo* behavior{};
private:
template <typename T, CommandId Id>
T& GenerateStart(const s32 node_id);
template <typename T>
void GenerateEnd(T& cmd);
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,783 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/common/audio_renderer_parameter.h"
#include "audio_core/renderer/behavior/behavior_info.h"
#include "audio_core/renderer/command/command_buffer.h"
#include "audio_core/renderer/command/command_generator.h"
#include "audio_core/renderer/command/command_list_header.h"
#include "audio_core/renderer/effect/effect_aux_info.h"
#include "audio_core/renderer/effect/effect_biquad_filter_info.h"
#include "audio_core/renderer/effect/effect_buffer_mixer_info.h"
#include "audio_core/renderer/effect/effect_capture_info.h"
#include "audio_core/renderer/effect/effect_context.h"
#include "audio_core/renderer/mix/mix_context.h"
#include "audio_core/renderer/performance/detail_aspect.h"
#include "audio_core/renderer/performance/entry_aspect.h"
#include "audio_core/renderer/sink/device_sink_info.h"
#include "audio_core/renderer/sink/sink_context.h"
#include "audio_core/renderer/splitter/splitter_context.h"
#include "audio_core/renderer/voice/voice_context.h"
#include "common/alignment.h"
namespace AudioCore::AudioRenderer {
CommandGenerator::CommandGenerator(CommandBuffer& command_buffer_,
const CommandListHeader& command_list_header_,
const AudioRendererSystemContext& render_context_,
VoiceContext& voice_context_, MixContext& mix_context_,
EffectContext& effect_context_, SinkContext& sink_context_,
SplitterContext& splitter_context_,
PerformanceManager* performance_manager_)
: command_buffer{command_buffer_}, command_header{command_list_header_},
render_context{render_context_}, voice_context{voice_context_}, mix_context{mix_context_},
effect_context{effect_context_}, sink_context{sink_context_},
splitter_context{splitter_context_}, performance_manager{performance_manager_} {
command_buffer.GenerateClearMixCommand(InvalidNodeId);
}
void CommandGenerator::GenerateDataSourceCommand(VoiceInfo& voice_info,
const VoiceState& voice_state, const s8 channel) {
if (voice_info.mix_id == UnusedMixId) {
if (voice_info.splitter_id != UnusedSplitterId) {
auto destination{splitter_context.GetDesintationData(voice_info.splitter_id, 0)};
u32 dest_id{0};
while (destination != nullptr) {
if (destination->IsConfigured()) {
auto mix_id{destination->GetMixId()};
if (mix_id < mix_context.GetCount()) {
auto mix_info{mix_context.GetInfo(mix_id)};
command_buffer.GenerateDepopPrepareCommand(
voice_info.node_id, voice_state, render_context.depop_buffer,
mix_info->buffer_count, mix_info->buffer_offset,
voice_info.was_playing);
}
}
dest_id++;
destination = splitter_context.GetDesintationData(voice_info.splitter_id, dest_id);
}
}
} else {
auto mix_info{mix_context.GetInfo(voice_info.mix_id)};
command_buffer.GenerateDepopPrepareCommand(
voice_info.node_id, voice_state, render_context.depop_buffer, mix_info->buffer_count,
mix_info->buffer_offset, voice_info.was_playing);
}
if (voice_info.was_playing) {
return;
}
if (render_context.behavior->IsWaveBufferVer2Supported()) {
switch (voice_info.sample_format) {
case SampleFormat::PcmInt16:
command_buffer.GeneratePcmInt16Version2Command(
voice_info.node_id, voice_info, voice_state, render_context.mix_buffer_count,
channel);
break;
case SampleFormat::PcmFloat:
command_buffer.GeneratePcmFloatVersion2Command(
voice_info.node_id, voice_info, voice_state, render_context.mix_buffer_count,
channel);
break;
case SampleFormat::Adpcm:
command_buffer.GenerateAdpcmVersion2Command(voice_info.node_id, voice_info, voice_state,
render_context.mix_buffer_count, channel);
break;
default:
LOG_ERROR(Service_Audio, "Invalid SampleFormat {}",
static_cast<u32>(voice_info.sample_format));
break;
}
} else {
switch (voice_info.sample_format) {
case SampleFormat::PcmInt16:
command_buffer.GeneratePcmInt16Version1Command(
voice_info.node_id, *command_buffer.memory_pool, voice_info, voice_state,
render_context.mix_buffer_count, channel);
break;
case SampleFormat::PcmFloat:
command_buffer.GeneratePcmFloatVersion1Command(
voice_info.node_id, *command_buffer.memory_pool, voice_info, voice_state,
render_context.mix_buffer_count, channel);
break;
case SampleFormat::Adpcm:
command_buffer.GenerateAdpcmVersion1Command(
voice_info.node_id, *command_buffer.memory_pool, voice_info, voice_state,
render_context.mix_buffer_count, channel);
break;
default:
LOG_ERROR(Service_Audio, "Invalid SampleFormat {}",
static_cast<u32>(voice_info.sample_format));
break;
}
}
}
void CommandGenerator::GenerateVoiceMixCommand(std::span<const f32> mix_volumes,
std::span<const f32> prev_mix_volumes,
const VoiceState& voice_state, s16 output_index,
const s16 buffer_count, const s16 input_index,
const s32 node_id) {
u8 precision{15};
if (render_context.behavior->IsVolumeMixParameterPrecisionQ23Supported()) {
precision = 23;
}
if (buffer_count > 8) {
const auto prev_samples{render_context.memory_pool_info->Translate(
CpuAddr(voice_state.previous_samples.data()), buffer_count * sizeof(s32))};
command_buffer.GenerateMixRampGroupedCommand(node_id, buffer_count, input_index,
output_index, mix_volumes, prev_mix_volumes,
prev_samples, precision);
} else {
for (s16 i = 0; i < buffer_count; i++, output_index++) {
const auto prev_samples{render_context.memory_pool_info->Translate(
CpuAddr(&voice_state.previous_samples[i]), sizeof(s32))};
command_buffer.GenerateMixRampCommand(node_id, buffer_count, input_index, output_index,
mix_volumes[i], prev_mix_volumes[i], prev_samples,
precision);
}
}
}
void CommandGenerator::GenerateBiquadFilterCommandForVoice(VoiceInfo& voice_info,
const VoiceState& voice_state,
const s16 buffer_count, const s8 channel,
const s32 node_id) {
const bool both_biquads_enabled{voice_info.biquads[0].enabled && voice_info.biquads[1].enabled};
const auto use_float_processing{render_context.behavior->UseBiquadFilterFloatProcessing()};
if (both_biquads_enabled && render_context.behavior->UseMultiTapBiquadFilterProcessing() &&
use_float_processing) {
command_buffer.GenerateMultitapBiquadFilterCommand(node_id, voice_info, voice_state,
buffer_count, channel);
} else {
for (u32 i = 0; i < MaxBiquadFilters; i++) {
if (voice_info.biquads[i].enabled) {
command_buffer.GenerateBiquadFilterCommand(node_id, voice_info, voice_state,
buffer_count, channel, i,
use_float_processing);
}
}
}
}
void CommandGenerator::GenerateVoiceCommand(VoiceInfo& voice_info) {
u8 precision{15};
if (render_context.behavior->IsVolumeMixParameterPrecisionQ23Supported()) {
precision = 23;
}
for (s8 channel = 0; channel < voice_info.channel_count; channel++) {
const auto resource_id{voice_info.channel_resource_ids[channel]};
auto& voice_state{voice_context.GetDspSharedState(resource_id)};
auto& channel_resource{voice_context.GetChannelResource(resource_id)};
PerformanceDetailType detail_type{PerformanceDetailType::Invalid};
switch (voice_info.sample_format) {
case SampleFormat::PcmInt16:
detail_type = PerformanceDetailType::Unk1;
break;
case SampleFormat::PcmFloat:
detail_type = PerformanceDetailType::Unk10;
break;
default:
detail_type = PerformanceDetailType::Unk2;
break;
}
DetailAspect data_source_detail(*this, PerformanceEntryType::Voice, voice_info.node_id,
detail_type);
GenerateDataSourceCommand(voice_info, voice_state, channel);
if (data_source_detail.initialized) {
command_buffer.GeneratePerformanceCommand(data_source_detail.node_id,
PerformanceState::Stop,
data_source_detail.performance_entry_address);
}
if (voice_info.was_playing) {
voice_info.prev_volume = 0.0f;
continue;
}
if (!voice_info.HasAnyConnection()) {
continue;
}
DetailAspect biquad_detail_aspect(*this, PerformanceEntryType::Voice, voice_info.node_id,
PerformanceDetailType::Unk4);
GenerateBiquadFilterCommandForVoice(
voice_info, voice_state, render_context.mix_buffer_count, channel, voice_info.node_id);
if (biquad_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
biquad_detail_aspect.node_id, PerformanceState::Stop,
biquad_detail_aspect.performance_entry_address);
}
DetailAspect volume_ramp_detail_aspect(*this, PerformanceEntryType::Voice,
voice_info.node_id, PerformanceDetailType::Unk3);
command_buffer.GenerateVolumeRampCommand(
voice_info.node_id, voice_info, render_context.mix_buffer_count + channel, precision);
if (volume_ramp_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
volume_ramp_detail_aspect.node_id, PerformanceState::Stop,
volume_ramp_detail_aspect.performance_entry_address);
}
voice_info.prev_volume = voice_info.volume;
if (voice_info.mix_id == UnusedMixId) {
if (voice_info.splitter_id != UnusedSplitterId) {
auto i{channel};
auto destination{splitter_context.GetDesintationData(voice_info.splitter_id, i)};
while (destination != nullptr) {
if (destination->IsConfigured()) {
const auto mix_id{destination->GetMixId()};
if (mix_id < mix_context.GetCount() &&
static_cast<s32>(mix_id) != UnusedSplitterId) {
auto mix_info{mix_context.GetInfo(mix_id)};
GenerateVoiceMixCommand(
destination->GetMixVolume(), destination->GetMixVolumePrev(),
voice_state, mix_info->buffer_offset, mix_info->buffer_count,
render_context.mix_buffer_count + channel, voice_info.node_id);
destination->MarkAsNeedToUpdateInternalState();
}
}
i += voice_info.channel_count;
destination = splitter_context.GetDesintationData(voice_info.splitter_id, i);
}
}
} else {
DetailAspect volume_mix_detail_aspect(*this, PerformanceEntryType::Voice,
voice_info.node_id, PerformanceDetailType::Unk3);
auto mix_info{mix_context.GetInfo(voice_info.mix_id)};
GenerateVoiceMixCommand(channel_resource.mix_volumes, channel_resource.prev_mix_volumes,
voice_state, mix_info->buffer_offset, mix_info->buffer_count,
render_context.mix_buffer_count + channel, voice_info.node_id);
if (volume_mix_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
volume_mix_detail_aspect.node_id, PerformanceState::Stop,
volume_mix_detail_aspect.performance_entry_address);
}
channel_resource.prev_mix_volumes = channel_resource.mix_volumes;
}
voice_info.biquad_initialized[0] = voice_info.biquads[0].enabled;
voice_info.biquad_initialized[1] = voice_info.biquads[1].enabled;
}
}
void CommandGenerator::GenerateVoiceCommands() {
const auto voice_count{voice_context.GetCount()};
for (u32 i = 0; i < voice_count; i++) {
auto sorted_info{voice_context.GetSortedInfo(i)};
if (sorted_info->ShouldSkip() || !sorted_info->UpdateForCommandGeneration(voice_context)) {
continue;
}
EntryAspect voice_entry_aspect(*this, PerformanceEntryType::Voice, sorted_info->node_id);
GenerateVoiceCommand(*sorted_info);
if (voice_entry_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(voice_entry_aspect.node_id,
PerformanceState::Stop,
voice_entry_aspect.performance_entry_address);
}
}
splitter_context.UpdateInternalState();
}
void CommandGenerator::GenerateBufferMixerCommand(const s16 buffer_offset,
EffectInfoBase& effect_info, const s32 node_id) {
u8 precision{15};
if (render_context.behavior->IsVolumeMixParameterPrecisionQ23Supported()) {
precision = 23;
}
if (effect_info.IsEnabled()) {
const auto& parameter{
*reinterpret_cast<BufferMixerInfo::ParameterVersion1*>(effect_info.GetParameter())};
for (u32 i = 0; i < parameter.mix_count; i++) {
if (parameter.volumes[i] != 0.0f) {
command_buffer.GenerateMixCommand(node_id, buffer_offset + parameter.inputs[i],
buffer_offset + parameter.outputs[i],
buffer_offset, parameter.volumes[i], precision);
}
}
}
}
void CommandGenerator::GenerateDelayCommand(const s16 buffer_offset, EffectInfoBase& effect_info,
const s32 node_id) {
command_buffer.GenerateDelayCommand(node_id, effect_info, buffer_offset);
}
void CommandGenerator::GenerateReverbCommand(const s16 buffer_offset, EffectInfoBase& effect_info,
const s32 node_id,
const bool long_size_pre_delay_supported) {
command_buffer.GenerateReverbCommand(node_id, effect_info, buffer_offset,
long_size_pre_delay_supported);
}
void CommandGenerator::GenerateI3dl2ReverbEffectCommand(const s16 buffer_offset,
EffectInfoBase& effect_info,
const s32 node_id) {
command_buffer.GenerateI3dl2ReverbCommand(node_id, effect_info, buffer_offset);
}
void CommandGenerator::GenerateAuxCommand(const s16 buffer_offset, EffectInfoBase& effect_info,
const s32 node_id) {
if (effect_info.IsEnabled()) {
effect_info.GetWorkbuffer(0);
effect_info.GetWorkbuffer(1);
}
if (effect_info.GetSendBuffer() != 0 && effect_info.GetReturnBuffer() != 0) {
const auto& parameter{
*reinterpret_cast<AuxInfo::ParameterVersion1*>(effect_info.GetParameter())};
auto channel_index{parameter.mix_buffer_count - 1};
u32 write_offset{0};
for (u32 i = 0; i < parameter.mix_buffer_count; i++, channel_index--) {
auto new_update_count{command_header.sample_count + write_offset};
const auto update_count{channel_index > 0 ? 0 : new_update_count};
command_buffer.GenerateAuxCommand(node_id, effect_info, parameter.inputs[i],
parameter.outputs[i], buffer_offset, update_count,
parameter.count_max, write_offset);
write_offset = new_update_count;
}
}
}
void CommandGenerator::GenerateBiquadFilterEffectCommand(const s16 buffer_offset,
EffectInfoBase& effect_info,
const s32 node_id) {
const auto& parameter{
*reinterpret_cast<BiquadFilterInfo::ParameterVersion1*>(effect_info.GetParameter())};
if (effect_info.IsEnabled()) {
bool needs_init{false};
switch (parameter.state) {
case EffectInfoBase::ParameterState::Initialized:
needs_init = true;
break;
case EffectInfoBase::ParameterState::Updating:
case EffectInfoBase::ParameterState::Updated:
if (render_context.behavior->IsBiquadFilterEffectStateClaerBugFixed()) {
needs_init = false;
} else {
needs_init = parameter.state == EffectInfoBase::ParameterState::Updating;
}
break;
default:
LOG_ERROR(Service_Audio, "Invalid biquad parameter state {}",
static_cast<u32>(parameter.state));
break;
}
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
command_buffer.GenerateBiquadFilterCommand(
node_id, effect_info, buffer_offset, channel, needs_init,
render_context.behavior->UseBiquadFilterFloatProcessing());
}
} else {
for (s8 channel = 0; channel < parameter.channel_count; channel++) {
command_buffer.GenerateCopyMixBufferCommand(node_id, effect_info, buffer_offset,
channel);
}
}
}
void CommandGenerator::GenerateLightLimiterEffectCommand(const s16 buffer_offset,
EffectInfoBase& effect_info,
const s32 node_id,
const u32 effect_index) {
const auto& state{*reinterpret_cast<LightLimiterInfo::State*>(effect_info.GetStateBuffer())};
if (render_context.behavior->IsEffectInfoVersion2Supported()) {
const auto& parameter{
*reinterpret_cast<LightLimiterInfo::ParameterVersion2*>(effect_info.GetParameter())};
const auto& result_state{*reinterpret_cast<LightLimiterInfo::StatisticsInternal*>(
&effect_context.GetDspSharedResultState(effect_index))};
command_buffer.GenerateLightLimiterCommand(node_id, buffer_offset, parameter, result_state,
state, effect_info.IsEnabled(),
effect_info.GetWorkbuffer(-1));
} else {
const auto& parameter{
*reinterpret_cast<LightLimiterInfo::ParameterVersion1*>(effect_info.GetParameter())};
command_buffer.GenerateLightLimiterCommand(node_id, buffer_offset, parameter, state,
effect_info.IsEnabled(),
effect_info.GetWorkbuffer(-1));
}
}
void CommandGenerator::GenerateCaptureCommand(const s16 buffer_offset, EffectInfoBase& effect_info,
const s32 node_id) {
if (effect_info.IsEnabled()) {
effect_info.GetWorkbuffer(0);
}
if (effect_info.GetSendBuffer()) {
const auto& parameter{
*reinterpret_cast<AuxInfo::ParameterVersion1*>(effect_info.GetParameter())};
auto channel_index{parameter.mix_buffer_count - 1};
u32 write_offset{0};
for (u32 i = 0; i < parameter.mix_buffer_count; i++, channel_index--) {
auto new_update_count{command_header.sample_count + write_offset};
const auto update_count{channel_index > 0 ? 0 : new_update_count};
command_buffer.GenerateCaptureCommand(node_id, effect_info, parameter.inputs[i],
parameter.outputs[i], buffer_offset, update_count,
parameter.count_max, write_offset);
write_offset = new_update_count;
}
}
}
void CommandGenerator::GenerateEffectCommand(MixInfo& mix_info) {
const auto effect_count{effect_context.GetCount()};
for (u32 i = 0; i < effect_count; i++) {
const auto effect_index{mix_info.effect_order_buffer[i]};
if (effect_index == -1) {
break;
}
auto& effect_info = effect_context.GetInfo(effect_index);
if (effect_info.ShouldSkip()) {
continue;
}
const auto entry_type{mix_info.mix_id == FinalMixId ? PerformanceEntryType::FinalMix
: PerformanceEntryType::SubMix};
switch (effect_info.GetType()) {
case EffectInfoBase::Type::Mix: {
DetailAspect mix_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk5);
GenerateBufferMixerCommand(mix_info.buffer_offset, effect_info, mix_info.node_id);
if (mix_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
mix_detail_aspect.node_id, PerformanceState::Stop,
mix_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::Aux: {
DetailAspect aux_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk7);
GenerateAuxCommand(mix_info.buffer_offset, effect_info, mix_info.node_id);
if (aux_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
aux_detail_aspect.node_id, PerformanceState::Stop,
aux_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::Delay: {
DetailAspect delay_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk6);
GenerateDelayCommand(mix_info.buffer_offset, effect_info, mix_info.node_id);
if (delay_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
delay_detail_aspect.node_id, PerformanceState::Stop,
delay_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::Reverb: {
DetailAspect reverb_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk8);
GenerateReverbCommand(mix_info.buffer_offset, effect_info, mix_info.node_id,
render_context.behavior->IsLongSizePreDelaySupported());
if (reverb_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
reverb_detail_aspect.node_id, PerformanceState::Stop,
reverb_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::I3dl2Reverb: {
DetailAspect i3dl2_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk9);
GenerateI3dl2ReverbEffectCommand(mix_info.buffer_offset, effect_info, mix_info.node_id);
if (i3dl2_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
i3dl2_detail_aspect.node_id, PerformanceState::Stop,
i3dl2_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::BiquadFilter: {
DetailAspect biquad_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk4);
GenerateBiquadFilterEffectCommand(mix_info.buffer_offset, effect_info,
mix_info.node_id);
if (biquad_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
biquad_detail_aspect.node_id, PerformanceState::Stop,
biquad_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::LightLimiter: {
DetailAspect light_limiter_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk11);
GenerateLightLimiterEffectCommand(mix_info.buffer_offset, effect_info, mix_info.node_id,
effect_index);
if (light_limiter_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
light_limiter_detail_aspect.node_id, PerformanceState::Stop,
light_limiter_detail_aspect.performance_entry_address);
}
} break;
case EffectInfoBase::Type::Capture: {
DetailAspect capture_detail_aspect(*this, entry_type, mix_info.node_id,
PerformanceDetailType::Unk12);
GenerateCaptureCommand(mix_info.buffer_offset, effect_info, mix_info.node_id);
if (capture_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
capture_detail_aspect.node_id, PerformanceState::Stop,
capture_detail_aspect.performance_entry_address);
}
} break;
default:
LOG_ERROR(Service_Audio, "Invalid effect type {}",
static_cast<u32>(effect_info.GetType()));
break;
}
effect_info.UpdateForCommandGeneration();
}
}
void CommandGenerator::GenerateMixCommands(MixInfo& mix_info) {
u8 precision{15};
if (render_context.behavior->IsVolumeMixParameterPrecisionQ23Supported()) {
precision = 23;
}
if (!mix_info.HasAnyConnection()) {
return;
}
if (mix_info.dst_mix_id == UnusedMixId) {
if (mix_info.dst_splitter_id != UnusedSplitterId) {
s16 dest_id{0};
auto destination{
splitter_context.GetDesintationData(mix_info.dst_splitter_id, dest_id)};
while (destination != nullptr) {
if (destination->IsConfigured()) {
auto splitter_mix_id{destination->GetMixId()};
if (splitter_mix_id < mix_context.GetCount()) {
auto splitter_mix_info{mix_context.GetInfo(splitter_mix_id)};
const s16 input_index{static_cast<s16>(mix_info.buffer_offset +
(dest_id % mix_info.buffer_count))};
for (s16 i = 0; i < splitter_mix_info->buffer_count; i++) {
auto volume{mix_info.volume * destination->GetMixVolume(i)};
if (volume != 0.0f) {
command_buffer.GenerateMixCommand(
mix_info.node_id, input_index,
splitter_mix_info->buffer_offset + i, mix_info.buffer_offset,
volume, precision);
}
}
}
}
dest_id++;
destination =
splitter_context.GetDesintationData(mix_info.dst_splitter_id, dest_id);
}
}
} else {
auto dest_mix_info{mix_context.GetInfo(mix_info.dst_mix_id)};
for (s16 i = 0; i < mix_info.buffer_count; i++) {
for (s16 j = 0; j < dest_mix_info->buffer_count; j++) {
auto volume{mix_info.volume * mix_info.mix_volumes[i][j]};
if (volume != 0.0f) {
command_buffer.GenerateMixCommand(mix_info.node_id, mix_info.buffer_offset + i,
dest_mix_info->buffer_offset + j,
mix_info.buffer_offset, volume, precision);
}
}
}
}
}
void CommandGenerator::GenerateSubMixCommand(MixInfo& mix_info) {
command_buffer.GenerateDepopForMixBuffersCommand(mix_info.node_id, mix_info,
render_context.depop_buffer);
GenerateEffectCommand(mix_info);
DetailAspect mix_detail_aspect(*this, PerformanceEntryType::SubMix, mix_info.node_id,
PerformanceDetailType::Unk5);
GenerateMixCommands(mix_info);
if (mix_detail_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(mix_detail_aspect.node_id, PerformanceState::Stop,
mix_detail_aspect.performance_entry_address);
}
}
void CommandGenerator::GenerateSubMixCommands() {
const auto submix_count{mix_context.GetCount()};
for (s32 i = 0; i < submix_count; i++) {
auto sorted_info{mix_context.GetSortedInfo(i)};
if (!sorted_info->in_use || sorted_info->mix_id == FinalMixId) {
continue;
}
EntryAspect submix_entry_aspect(*this, PerformanceEntryType::SubMix, sorted_info->node_id);
GenerateSubMixCommand(*sorted_info);
if (submix_entry_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(
submix_entry_aspect.node_id, PerformanceState::Stop,
submix_entry_aspect.performance_entry_address);
}
}
}
void CommandGenerator::GenerateFinalMixCommand() {
auto& final_mix_info{*mix_context.GetFinalMixInfo()};
command_buffer.GenerateDepopForMixBuffersCommand(final_mix_info.node_id, final_mix_info,
render_context.depop_buffer);
GenerateEffectCommand(final_mix_info);
u8 precision{15};
if (render_context.behavior->IsVolumeMixParameterPrecisionQ23Supported()) {
precision = 23;
}
for (s16 i = 0; i < final_mix_info.buffer_count; i++) {
DetailAspect volume_aspect(*this, PerformanceEntryType::FinalMix, final_mix_info.node_id,
PerformanceDetailType::Unk3);
command_buffer.GenerateVolumeCommand(final_mix_info.node_id, final_mix_info.buffer_offset,
i, final_mix_info.volume, precision);
if (volume_aspect.initialized) {
command_buffer.GeneratePerformanceCommand(volume_aspect.node_id, PerformanceState::Stop,
volume_aspect.performance_entry_address);
}
}
}
void CommandGenerator::GenerateFinalMixCommands() {
auto final_mix_info{mix_context.GetFinalMixInfo()};
EntryAspect final_mix_entry(*this, PerformanceEntryType::FinalMix, final_mix_info->node_id);
GenerateFinalMixCommand();
if (final_mix_entry.initialized) {
command_buffer.GeneratePerformanceCommand(final_mix_entry.node_id, PerformanceState::Stop,
final_mix_entry.performance_entry_address);
}
}
void CommandGenerator::GenerateSinkCommands() {
const auto sink_count{sink_context.GetCount()};
for (u32 i = 0; i < sink_count; i++) {
auto sink_info{sink_context.GetInfo(i)};
if (sink_info->IsUsed() && sink_info->GetType() == SinkInfoBase::Type::DeviceSink) {
auto state{reinterpret_cast<DeviceSinkInfo::DeviceState*>(sink_info->GetState())};
if (command_header.sample_rate != TargetSampleRate &&
state->upsampler_info == nullptr) {
auto device_state{sink_info->GetDeviceState()};
device_state->upsampler_info = render_context.upsampler_manager->Allocate();
}
EntryAspect device_sink_entry(*this, PerformanceEntryType::Sink,
sink_info->GetNodeId());
auto final_mix{mix_context.GetFinalMixInfo()};
GenerateSinkCommand(final_mix->buffer_offset, *sink_info);
if (device_sink_entry.initialized) {
command_buffer.GeneratePerformanceCommand(
device_sink_entry.node_id, PerformanceState::Stop,
device_sink_entry.performance_entry_address);
}
}
}
for (u32 i = 0; i < sink_count; i++) {
auto sink_info{sink_context.GetInfo(i)};
if (sink_info->IsUsed() && sink_info->GetType() == SinkInfoBase::Type::CircularBufferSink) {
EntryAspect circular_buffer_entry(*this, PerformanceEntryType::Sink,
sink_info->GetNodeId());
auto final_mix{mix_context.GetFinalMixInfo()};
GenerateSinkCommand(final_mix->buffer_offset, *sink_info);
if (circular_buffer_entry.initialized) {
command_buffer.GeneratePerformanceCommand(
circular_buffer_entry.node_id, PerformanceState::Stop,
circular_buffer_entry.performance_entry_address);
}
}
}
}
void CommandGenerator::GenerateSinkCommand(const s16 buffer_offset, SinkInfoBase& sink_info) {
if (sink_info.ShouldSkip()) {
return;
}
switch (sink_info.GetType()) {
case SinkInfoBase::Type::DeviceSink:
GenerateDeviceSinkCommand(buffer_offset, sink_info);
break;
case SinkInfoBase::Type::CircularBufferSink:
command_buffer.GenerateCircularBufferSinkCommand(sink_info.GetNodeId(), sink_info,
buffer_offset);
break;
default:
LOG_ERROR(Service_Audio, "Invalid sink type {}", static_cast<u32>(sink_info.GetType()));
break;
}
sink_info.UpdateForCommandGeneration();
}
void CommandGenerator::GenerateDeviceSinkCommand(const s16 buffer_offset, SinkInfoBase& sink_info) {
auto& parameter{
*reinterpret_cast<DeviceSinkInfo::DeviceInParameter*>(sink_info.GetParameter())};
auto state{*reinterpret_cast<DeviceSinkInfo::DeviceState*>(sink_info.GetState())};
if (render_context.channels == 2 && parameter.downmix_enabled) {
command_buffer.GenerateDownMix6chTo2chCommand(InvalidNodeId, parameter.inputs,
buffer_offset, parameter.downmix_coeff);
} else if (render_context.channels == 2 && parameter.input_count == 6) {
constexpr std::array<f32, 4> default_coeffs{{1.0f, 0.707f, 0.251f, 0.707f}};
command_buffer.GenerateDownMix6chTo2chCommand(InvalidNodeId, parameter.inputs,
buffer_offset, default_coeffs);
}
if (state.upsampler_info != nullptr) {
command_buffer.GenerateUpsampleCommand(
InvalidNodeId, buffer_offset, *state.upsampler_info, parameter.input_count,
parameter.inputs, command_header.buffer_count, command_header.sample_count,
command_header.sample_rate);
}
command_buffer.GenerateDeviceSinkCommand(InvalidNodeId, buffer_offset, sink_info,
render_context.session_id,
command_header.samples_buffer);
}
void CommandGenerator::GeneratePerformanceCommand(
s32 node_id, PerformanceState state, const PerformanceEntryAddresses& entry_addresses) {
command_buffer.GeneratePerformanceCommand(node_id, state, entry_addresses);
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,339 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/renderer/command/commands.h"
#include "audio_core/renderer/performance/performance_manager.h"
#include "common/common_types.h"
namespace AudioCore {
struct AudioRendererSystemContext;
namespace AudioRenderer {
class CommandBuffer;
struct CommandListHeader;
class VoiceContext;
class MixContext;
class EffectContext;
class SplitterContext;
class SinkContext;
class BehaviorInfo;
class VoiceInfo;
struct VoiceState;
class MixInfo;
class SinkInfoBase;
/**
* Generates all commands to build up a command list, which are sent to the AudioRender for
* processing.
*/
class CommandGenerator {
public:
explicit CommandGenerator(CommandBuffer& command_buffer,
const CommandListHeader& command_list_header,
const AudioRendererSystemContext& render_context,
VoiceContext& voice_context, MixContext& mix_context,
EffectContext& effect_context, SinkContext& sink_context,
SplitterContext& splitter_context,
PerformanceManager* performance_manager);
/**
* Calculate the buffer size needed for commands.
*
* @param behavior - Used to check what features are enabled.
* @param params - Input rendering parameters for numbers of voices/mixes/sinks etc.
*/
static u64 CalculateCommandBufferSize(const BehaviorInfo& behavior,
const AudioRendererParameterInternal& params) {
u64 size{0};
// Effects
size += params.effects * sizeof(EffectInfoBase);
// Voices
u64 voice_size{0};
if (behavior.IsWaveBufferVer2Supported()) {
voice_size = std::max(std::max(sizeof(AdpcmDataSourceVersion2Command),
sizeof(PcmInt16DataSourceVersion2Command)),
sizeof(PcmFloatDataSourceVersion2Command));
} else {
voice_size = std::max(std::max(sizeof(AdpcmDataSourceVersion1Command),
sizeof(PcmInt16DataSourceVersion1Command)),
sizeof(PcmFloatDataSourceVersion1Command));
}
voice_size += sizeof(BiquadFilterCommand) * MaxBiquadFilters;
voice_size += sizeof(VolumeRampCommand);
voice_size += sizeof(MixRampGroupedCommand);
size += params.voices * (params.splitter_infos * sizeof(DepopPrepareCommand) + voice_size);
// Sub mixes
size += sizeof(DepopForMixBuffersCommand) +
(sizeof(MixCommand) * MaxMixBuffers) * MaxMixBuffers;
// Final mix
size += sizeof(DepopForMixBuffersCommand) + sizeof(VolumeCommand) * MaxMixBuffers;
// Splitters
size += params.splitter_destinations * sizeof(MixRampCommand) * MaxMixBuffers;
// Sinks
size +=
params.sinks * std::max(sizeof(DeviceSinkCommand), sizeof(CircularBufferSinkCommand));
// Performance
size += (params.effects + params.voices + params.sinks + params.sub_mixes + 1 +
PerformanceManager::MaxDetailEntries) *
sizeof(PerformanceCommand);
return size;
}
/**
* Get the current command buffer used to generate commands.
*
* @return The command buffer.
*/
CommandBuffer& GetCommandBuffer() {
return command_buffer;
}
/**
* Get the current performance manager,
*
* @return The performance manager. May be nullptr.
*/
PerformanceManager* GetPerformanceManager() {
return performance_manager;
}
/**
* Generate a data source command.
* These are the basis for all audio output.
*
* @param voice_info - Generate the command from this voice.
* @param voice_state - State used by the AudioRenderer across calls.
* @param channel - Channel index to generate the command into.
*/
void GenerateDataSourceCommand(VoiceInfo& voice_info, const VoiceState& voice_state,
s8 channel);
/**
* Generate voice mixing commands.
* These are used to mix buffers together, to mix one input to many outputs,
* and also used as copy commands to move data around and prevent it being accidentally
* overwritten, e.g by another data source command into the same channel.
*
* @param mix_volumes - Current volumes of the mix.
* @param prev_mix_volumes - Previous volumes of the mix.
* @param voice_state - State used by the AudioRenderer across calls.
* @param output_index - Output mix buffer index.
* @param buffer_count - Number of active mix buffers.
* @param input_index - Input mix buffer index.
* @param node_id - Node id of the voice this command is generated for.
*/
void GenerateVoiceMixCommand(std::span<const f32> mix_volumes,
std::span<const f32> prev_mix_volumes,
const VoiceState& voice_state, s16 output_index, s16 buffer_count,
s16 input_index, s32 node_id);
/**
* Generate a biquad filter command for a voice.
*
* @param voice_info - Voice info this command is generated from.
* @param voice_state - State used by the AudioRenderer across calls.
* @param buffer_count - Number of active mix buffers.
* @param channel - Channel index of this command.
* @param node_id - Node id of the voice this command is generated for.
*/
void GenerateBiquadFilterCommandForVoice(VoiceInfo& voice_info, const VoiceState& voice_state,
s16 buffer_count, s8 channel, s32 node_id);
/**
* Generate commands for a voice.
* Includes a data source, biquad filter, volume and mixing.
*
* @param voice_info - Voice info these commands are generated from.
*/
void GenerateVoiceCommand(VoiceInfo& voice_info);
/**
* Generate commands for all voices.
*/
void GenerateVoiceCommands();
/**
* Generate a mixing command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - BufferMixer effect info.
* @param node_id - Node id of the mix this command is generated for.
*/
void GenerateBufferMixerCommand(s16 buffer_offset, EffectInfoBase& effect_info_base,
s32 node_id);
/**
* Generate a delay effect command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - Delay effect info.
* @param node_id - Node id of the mix this command is generated for.
*/
void GenerateDelayCommand(s16 buffer_offset, EffectInfoBase& effect_info_base, s32 node_id);
/**
* Generate a reverb effect command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - Reverb effect info.
* @param node_id - Node id of the mix this command is generated for.
* @param long_size_pre_delay_supported - Use a longer pre-delay time before reverb starts.
*/
void GenerateReverbCommand(s16 buffer_offset, EffectInfoBase& effect_info_base, s32 node_id,
bool long_size_pre_delay_supported);
/**
* Generate an I3DL2 reverb effect command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - I3DL2Reverb effect info.
* @param node_id - Node id of the mix this command is generated for.
*/
void GenerateI3dl2ReverbEffectCommand(s16 buffer_offset, EffectInfoBase& effect_info,
s32 node_id);
/**
* Generate an aux effect command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - Aux effect info.
* @param node_id - Node id of the mix this command is generated for.
*/
void GenerateAuxCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id);
/**
* Generate a biquad filter effect command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - Aux effect info.
* @param node_id - Node id of the mix this command is generated for.
*/
void GenerateBiquadFilterEffectCommand(s16 buffer_offset, EffectInfoBase& effect_info,
s32 node_id);
/**
* Generate a light limiter effect command.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - Limiter effect info.
* @param node_id - Node id of the mix this command is generated for.
* @param effect_index - Index for the statistics state.
*/
void GenerateLightLimiterEffectCommand(s16 buffer_offset, EffectInfoBase& effect_info,
s32 node_id, u32 effect_index);
/**
* Generate a capture effect command.
* Writes a mix buffer back to game memory.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param effect_info_base - Capture effect info.
* @param node_id - Node id of the mix this command is generated for.
*/
void GenerateCaptureCommand(s16 buffer_offset, EffectInfoBase& effect_info, s32 node_id);
/**
* Generate all effect commands for a mix.
*
* @param mix_info - Mix to generate effects from.
*/
void GenerateEffectCommand(MixInfo& mix_info);
/**
* Generate all mix commands.
*
* @param mix_info - Mix to generate effects from.
*/
void GenerateMixCommands(MixInfo& mix_info);
/**
* Generate a submix command.
* Generates all effects and all mixing commands.
*
* @param mix_info - Mix to generate effects from.
*/
void GenerateSubMixCommand(MixInfo& mix_info);
/**
* Generate all submix command.
*/
void GenerateSubMixCommands();
/**
* Generate the final mix.
*/
void GenerateFinalMixCommand();
/**
* Generate the final mix commands.
*/
void GenerateFinalMixCommands();
/**
* Generate all sink commands.
*/
void GenerateSinkCommands();
/**
* Generate a sink command.
* Sends samples out to the backend, or a game-supplied circular buffer.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param sink_info - Sink info to generate the commands from.
*/
void GenerateSinkCommand(s16 buffer_offset, SinkInfoBase& sink_info);
/**
* Generate a device sink command.
* Sends samples out to the backend.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param sink_info - Sink info to generate the commands from.
*/
void GenerateDeviceSinkCommand(s16 buffer_offset, SinkInfoBase& sink_info);
/**
* Generate a performance command.
* Used to report performance metrics of the AudioRenderer back to the game.
*
* @param buffer_offset - Base mix buffer offset to use.
* @param sink_info - Sink info to generate the commands from.
*/
void GeneratePerformanceCommand(s32 node_id, PerformanceState state,
const PerformanceEntryAddresses& entry_addresses);
private:
/// Commands will be written by this buffer
CommandBuffer& command_buffer;
/// Header information for the commands generated
const CommandListHeader& command_header;
/// Various things to control generation
const AudioRendererSystemContext& render_context;
/// Used for generating voices
VoiceContext& voice_context;
/// Used for generating mixes
MixContext& mix_context;
/// Used for generating effects
EffectContext& effect_context;
/// Used for generating sinks
SinkContext& sink_context;
/// Used for generating submixes
SplitterContext& splitter_context;
/// Used for generating performance
PerformanceManager* performance_manager;
};
} // namespace AudioRenderer
} // namespace AudioCore

View file

@ -0,0 +1,22 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <span>
#include "audio_core/common/common.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
struct CommandListHeader {
u64 buffer_size;
u32 command_count;
std::span<s32> samples_buffer;
s16 buffer_count;
u32 sample_count;
u32 sample_rate;
};
} // namespace AudioCore::AudioRenderer

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,208 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "audio_core/renderer/command/commands.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
/**
* Estimate the processing time required for all commands.
*/
class ICommandProcessingTimeEstimator {
public:
virtual ~ICommandProcessingTimeEstimator() = default;
virtual u32 Estimate(const PcmInt16DataSourceVersion1Command& command) const = 0;
virtual u32 Estimate(const PcmInt16DataSourceVersion2Command& command) const = 0;
virtual u32 Estimate(const PcmFloatDataSourceVersion1Command& command) const = 0;
virtual u32 Estimate(const PcmFloatDataSourceVersion2Command& command) const = 0;
virtual u32 Estimate(const AdpcmDataSourceVersion1Command& command) const = 0;
virtual u32 Estimate(const AdpcmDataSourceVersion2Command& command) const = 0;
virtual u32 Estimate(const VolumeCommand& command) const = 0;
virtual u32 Estimate(const VolumeRampCommand& command) const = 0;
virtual u32 Estimate(const BiquadFilterCommand& command) const = 0;
virtual u32 Estimate(const MixCommand& command) const = 0;
virtual u32 Estimate(const MixRampCommand& command) const = 0;
virtual u32 Estimate(const MixRampGroupedCommand& command) const = 0;
virtual u32 Estimate(const DepopPrepareCommand& command) const = 0;
virtual u32 Estimate(const DepopForMixBuffersCommand& command) const = 0;
virtual u32 Estimate(const DelayCommand& command) const = 0;
virtual u32 Estimate(const UpsampleCommand& command) const = 0;
virtual u32 Estimate(const DownMix6chTo2chCommand& command) const = 0;
virtual u32 Estimate(const AuxCommand& command) const = 0;
virtual u32 Estimate(const DeviceSinkCommand& command) const = 0;
virtual u32 Estimate(const CircularBufferSinkCommand& command) const = 0;
virtual u32 Estimate(const ReverbCommand& command) const = 0;
virtual u32 Estimate(const I3dl2ReverbCommand& command) const = 0;
virtual u32 Estimate(const PerformanceCommand& command) const = 0;
virtual u32 Estimate(const ClearMixBufferCommand& command) const = 0;
virtual u32 Estimate(const CopyMixBufferCommand& command) const = 0;
virtual u32 Estimate(const LightLimiterVersion1Command& command) const = 0;
virtual u32 Estimate(const LightLimiterVersion2Command& command) const = 0;
virtual u32 Estimate(const MultiTapBiquadFilterCommand& command) const = 0;
virtual u32 Estimate(const CaptureCommand& command) const = 0;
};
class CommandProcessingTimeEstimatorVersion1 final : public ICommandProcessingTimeEstimator {
public:
CommandProcessingTimeEstimatorVersion1(u32 sample_count_, u32 buffer_count_)
: sample_count{sample_count_}, buffer_count{buffer_count_} {}
u32 Estimate(const PcmInt16DataSourceVersion1Command& command) const override;
u32 Estimate(const PcmInt16DataSourceVersion2Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion1Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion2Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion1Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion2Command& command) const override;
u32 Estimate(const VolumeCommand& command) const override;
u32 Estimate(const VolumeRampCommand& command) const override;
u32 Estimate(const BiquadFilterCommand& command) const override;
u32 Estimate(const MixCommand& command) const override;
u32 Estimate(const MixRampCommand& command) const override;
u32 Estimate(const MixRampGroupedCommand& command) const override;
u32 Estimate(const DepopPrepareCommand& command) const override;
u32 Estimate(const DepopForMixBuffersCommand& command) const override;
u32 Estimate(const DelayCommand& command) const override;
u32 Estimate(const UpsampleCommand& command) const override;
u32 Estimate(const DownMix6chTo2chCommand& command) const override;
u32 Estimate(const AuxCommand& command) const override;
u32 Estimate(const DeviceSinkCommand& command) const override;
u32 Estimate(const CircularBufferSinkCommand& command) const override;
u32 Estimate(const ReverbCommand& command) const override;
u32 Estimate(const I3dl2ReverbCommand& command) const override;
u32 Estimate(const PerformanceCommand& command) const override;
u32 Estimate(const ClearMixBufferCommand& command) const override;
u32 Estimate(const CopyMixBufferCommand& command) const override;
u32 Estimate(const LightLimiterVersion1Command& command) const override;
u32 Estimate(const LightLimiterVersion2Command& command) const override;
u32 Estimate(const MultiTapBiquadFilterCommand& command) const override;
u32 Estimate(const CaptureCommand& command) const override;
private:
u32 sample_count{};
u32 buffer_count{};
};
class CommandProcessingTimeEstimatorVersion2 final : public ICommandProcessingTimeEstimator {
public:
CommandProcessingTimeEstimatorVersion2(u32 sample_count_, u32 buffer_count_)
: sample_count{sample_count_}, buffer_count{buffer_count_} {}
u32 Estimate(const PcmInt16DataSourceVersion1Command& command) const override;
u32 Estimate(const PcmInt16DataSourceVersion2Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion1Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion2Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion1Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion2Command& command) const override;
u32 Estimate(const VolumeCommand& command) const override;
u32 Estimate(const VolumeRampCommand& command) const override;
u32 Estimate(const BiquadFilterCommand& command) const override;
u32 Estimate(const MixCommand& command) const override;
u32 Estimate(const MixRampCommand& command) const override;
u32 Estimate(const MixRampGroupedCommand& command) const override;
u32 Estimate(const DepopPrepareCommand& command) const override;
u32 Estimate(const DepopForMixBuffersCommand& command) const override;
u32 Estimate(const DelayCommand& command) const override;
u32 Estimate(const UpsampleCommand& command) const override;
u32 Estimate(const DownMix6chTo2chCommand& command) const override;
u32 Estimate(const AuxCommand& command) const override;
u32 Estimate(const DeviceSinkCommand& command) const override;
u32 Estimate(const CircularBufferSinkCommand& command) const override;
u32 Estimate(const ReverbCommand& command) const override;
u32 Estimate(const I3dl2ReverbCommand& command) const override;
u32 Estimate(const PerformanceCommand& command) const override;
u32 Estimate(const ClearMixBufferCommand& command) const override;
u32 Estimate(const CopyMixBufferCommand& command) const override;
u32 Estimate(const LightLimiterVersion1Command& command) const override;
u32 Estimate(const LightLimiterVersion2Command& command) const override;
u32 Estimate(const MultiTapBiquadFilterCommand& command) const override;
u32 Estimate(const CaptureCommand& command) const override;
private:
u32 sample_count{};
u32 buffer_count{};
};
class CommandProcessingTimeEstimatorVersion3 final : public ICommandProcessingTimeEstimator {
public:
CommandProcessingTimeEstimatorVersion3(u32 sample_count_, u32 buffer_count_)
: sample_count{sample_count_}, buffer_count{buffer_count_} {}
u32 Estimate(const PcmInt16DataSourceVersion1Command& command) const override;
u32 Estimate(const PcmInt16DataSourceVersion2Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion1Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion2Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion1Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion2Command& command) const override;
u32 Estimate(const VolumeCommand& command) const override;
u32 Estimate(const VolumeRampCommand& command) const override;
u32 Estimate(const BiquadFilterCommand& command) const override;
u32 Estimate(const MixCommand& command) const override;
u32 Estimate(const MixRampCommand& command) const override;
u32 Estimate(const MixRampGroupedCommand& command) const override;
u32 Estimate(const DepopPrepareCommand& command) const override;
u32 Estimate(const DepopForMixBuffersCommand& command) const override;
u32 Estimate(const DelayCommand& command) const override;
u32 Estimate(const UpsampleCommand& command) const override;
u32 Estimate(const DownMix6chTo2chCommand& command) const override;
u32 Estimate(const AuxCommand& command) const override;
u32 Estimate(const DeviceSinkCommand& command) const override;
u32 Estimate(const CircularBufferSinkCommand& command) const override;
u32 Estimate(const ReverbCommand& command) const override;
u32 Estimate(const I3dl2ReverbCommand& command) const override;
u32 Estimate(const PerformanceCommand& command) const override;
u32 Estimate(const ClearMixBufferCommand& command) const override;
u32 Estimate(const CopyMixBufferCommand& command) const override;
u32 Estimate(const LightLimiterVersion1Command& command) const override;
u32 Estimate(const LightLimiterVersion2Command& command) const override;
u32 Estimate(const MultiTapBiquadFilterCommand& command) const override;
u32 Estimate(const CaptureCommand& command) const override;
private:
u32 sample_count{};
u32 buffer_count{};
};
class CommandProcessingTimeEstimatorVersion4 final : public ICommandProcessingTimeEstimator {
public:
CommandProcessingTimeEstimatorVersion4(u32 sample_count_, u32 buffer_count_)
: sample_count{sample_count_}, buffer_count{buffer_count_} {}
u32 Estimate(const PcmInt16DataSourceVersion1Command& command) const override;
u32 Estimate(const PcmInt16DataSourceVersion2Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion1Command& command) const override;
u32 Estimate(const PcmFloatDataSourceVersion2Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion1Command& command) const override;
u32 Estimate(const AdpcmDataSourceVersion2Command& command) const override;
u32 Estimate(const VolumeCommand& command) const override;
u32 Estimate(const VolumeRampCommand& command) const override;
u32 Estimate(const BiquadFilterCommand& command) const override;
u32 Estimate(const MixCommand& command) const override;
u32 Estimate(const MixRampCommand& command) const override;
u32 Estimate(const MixRampGroupedCommand& command) const override;
u32 Estimate(const DepopPrepareCommand& command) const override;
u32 Estimate(const DepopForMixBuffersCommand& command) const override;
u32 Estimate(const DelayCommand& command) const override;
u32 Estimate(const UpsampleCommand& command) const override;
u32 Estimate(const DownMix6chTo2chCommand& command) const override;
u32 Estimate(const AuxCommand& command) const override;
u32 Estimate(const DeviceSinkCommand& command) const override;
u32 Estimate(const CircularBufferSinkCommand& command) const override;
u32 Estimate(const ReverbCommand& command) const override;
u32 Estimate(const I3dl2ReverbCommand& command) const override;
u32 Estimate(const PerformanceCommand& command) const override;
u32 Estimate(const ClearMixBufferCommand& command) const override;
u32 Estimate(const CopyMixBufferCommand& command) const override;
u32 Estimate(const LightLimiterVersion1Command& command) const override;
u32 Estimate(const LightLimiterVersion2Command& command) const override;
u32 Estimate(const MultiTapBiquadFilterCommand& command) const override;
u32 Estimate(const CaptureCommand& command) const override;
private:
u32 sample_count{};
u32 buffer_count{};
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,31 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "audio_core/renderer/command/data_source/adpcm.h"
#include "audio_core/renderer/command/data_source/pcm_float.h"
#include "audio_core/renderer/command/data_source/pcm_int16.h"
#include "audio_core/renderer/command/effect/aux_.h"
#include "audio_core/renderer/command/effect/biquad_filter.h"
#include "audio_core/renderer/command/effect/capture.h"
#include "audio_core/renderer/command/effect/delay.h"
#include "audio_core/renderer/command/effect/i3dl2_reverb.h"
#include "audio_core/renderer/command/effect/light_limiter.h"
#include "audio_core/renderer/command/effect/multi_tap_biquad_filter.h"
#include "audio_core/renderer/command/effect/reverb.h"
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/command/mix/clear_mix.h"
#include "audio_core/renderer/command/mix/copy_mix.h"
#include "audio_core/renderer/command/mix/depop_for_mix_buffers.h"
#include "audio_core/renderer/command/mix/depop_prepare.h"
#include "audio_core/renderer/command/mix/mix.h"
#include "audio_core/renderer/command/mix/mix_ramp.h"
#include "audio_core/renderer/command/mix/mix_ramp_grouped.h"
#include "audio_core/renderer/command/mix/volume.h"
#include "audio_core/renderer/command/mix/volume_ramp.h"
#include "audio_core/renderer/command/performance/performance.h"
#include "audio_core/renderer/command/resample/downmix_6ch_to_2ch.h"
#include "audio_core/renderer/command/resample/upsample.h"
#include "audio_core/renderer/command/sink/circular_buffer.h"
#include "audio_core/renderer/command/sink/device.h"

View file

@ -0,0 +1,84 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <span>
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/data_source/adpcm.h"
#include "audio_core/renderer/command/data_source/decode.h"
namespace AudioCore::AudioRenderer {
void AdpcmDataSourceVersion1Command::Dump(const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("AdpcmDataSourceVersion1Command\n\toutput_index {:02X} source sample "
"rate {} target sample rate {} src quality {}\n",
output_index, sample_rate, processor.target_sample_rate, src_quality);
}
void AdpcmDataSourceVersion1Command::Process(const ADSP::CommandListProcessor& processor) {
auto out_buffer{processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count)};
DecodeFromWaveBuffersArgs args{
.sample_format{SampleFormat::Adpcm},
.output{out_buffer},
.voice_state{reinterpret_cast<VoiceState*>(voice_state)},
.wave_buffers{wave_buffers},
.channel{0},
.channel_count{1},
.src_quality{src_quality},
.pitch{pitch},
.source_sample_rate{sample_rate},
.target_sample_rate{processor.target_sample_rate},
.sample_count{processor.sample_count},
.data_address{data_address},
.data_size{data_size},
.IsVoicePlayedSampleCountResetAtLoopPointSupported{(flags & 1) != 0},
.IsVoicePitchAndSrcSkippedSupported{(flags & 2) != 0},
};
DecodeFromWaveBuffers(*processor.memory, args);
}
bool AdpcmDataSourceVersion1Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
void AdpcmDataSourceVersion2Command::Dump(const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("AdpcmDataSourceVersion2Command\n\toutput_index {:02X} source sample "
"rate {} target sample rate {} src quality {}\n",
output_index, sample_rate, processor.target_sample_rate, src_quality);
}
void AdpcmDataSourceVersion2Command::Process(const ADSP::CommandListProcessor& processor) {
auto out_buffer{processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count)};
DecodeFromWaveBuffersArgs args{
.sample_format{SampleFormat::Adpcm},
.output{out_buffer},
.voice_state{reinterpret_cast<VoiceState*>(voice_state)},
.wave_buffers{wave_buffers},
.channel{0},
.channel_count{1},
.src_quality{src_quality},
.pitch{pitch},
.source_sample_rate{sample_rate},
.target_sample_rate{processor.target_sample_rate},
.sample_count{processor.sample_count},
.data_address{data_address},
.data_size{data_size},
.IsVoicePlayedSampleCountResetAtLoopPointSupported{(flags & 1) != 0},
.IsVoicePitchAndSrcSkippedSupported{(flags & 2) != 0},
};
DecodeFromWaveBuffers(*processor.memory, args);
}
bool AdpcmDataSourceVersion2Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,119 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "audio_core/common/common.h"
#include "audio_core/common/wave_buffer.h"
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command to decode ADPCM-encoded version 1 wavebuffers
* into the output_index mix buffer.
*/
struct AdpcmDataSourceVersion1Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Quality used for sample rate conversion
SrcQuality src_quality;
/// Mix buffer index for decoded samples
s16 output_index;
/// Flags to control decoding (see AudioCore::AudioRenderer::VoiceInfo::Flags)
u16 flags;
/// Wavebuffer sample rate
u32 sample_rate;
/// Pitch used for sample rate conversion
f32 pitch;
/// Wavebuffers containing the wavebuffer address, context address, looping information etc
std::array<WaveBufferVersion2, MaxWaveBuffers> wave_buffers;
/// Voice state, updated each call and written back to game
CpuAddr voice_state;
/// Coefficients data address
CpuAddr data_address;
/// Coefficients data size
u64 data_size;
};
/**
* AudioRenderer command to decode ADPCM-encoded version 2 wavebuffers
* into the output_index mix buffer.
*/
struct AdpcmDataSourceVersion2Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Quality used for sample rate conversion
SrcQuality src_quality;
/// Mix buffer index for decoded samples
s16 output_index;
/// Flags to control decoding (see AudioCore::AudioRenderer::VoiceInfo::Flags)
u16 flags;
/// Wavebuffer sample rate
u32 sample_rate;
/// Pitch used for sample rate conversion
f32 pitch;
/// Target channel to read within the wavebuffer
s8 channel_index;
/// Number of channels within the wavebuffer
s8 channel_count;
/// Wavebuffers containing the wavebuffer address, context address, looping information etc
std::array<WaveBufferVersion2, MaxWaveBuffers> wave_buffers;
/// Voice state, updated each call and written back to game
CpuAddr voice_state;
/// Coefficients data address
CpuAddr data_address;
/// Coefficients data size
u64 data_size;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,428 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <array>
#include <vector>
#include "audio_core/renderer/command/data_source/decode.h"
#include "audio_core/renderer/command/resample/resample.h"
#include "common/fixed_point.h"
#include "common/logging/log.h"
#include "core/memory.h"
namespace AudioCore::AudioRenderer {
constexpr u32 TempBufferSize = 0x3F00;
constexpr std::array<u8, 3> PitchBySrcQuality = {4, 8, 4};
/**
* Decode PCM data. Only s16 or f32 is supported.
*
* @tparam T - Type to decode. Only s16 and f32 are supported.
* @param memory - Core memory for reading samples.
* @param out_buffer - Output mix buffer to receive the samples.
* @param req - Information for how to decode.
* @return Number of samples decoded.
*/
template <typename T>
static u32 DecodePcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
const DecodeArg& req) {
constexpr s32 min{std::numeric_limits<s16>::min()};
constexpr s32 max{std::numeric_limits<s16>::max()};
if (req.buffer == 0 || req.buffer_size == 0) {
return 0;
}
if (req.start_offset >= req.end_offset) {
return 0;
}
auto samples_to_decode{
std::min(req.samples_to_read, req.end_offset - req.start_offset - req.offset)};
u32 channel_count{static_cast<u32>(req.channel_count)};
switch (req.channel_count) {
default: {
const VAddr source{req.buffer +
(((req.start_offset + req.offset) * channel_count) * sizeof(T))};
const u64 size{channel_count * samples_to_decode};
const u64 size_bytes{size * sizeof(T)};
std::vector<T> samples(size);
memory.ReadBlockUnsafe(source, samples.data(), size_bytes);
if constexpr (std::is_floating_point_v<T>) {
for (u32 i = 0; i < samples_to_decode; i++) {
auto sample{static_cast<s32>(samples[i * channel_count + req.target_channel] *
std::numeric_limits<s16>::max())};
out_buffer[i] = static_cast<s16>(std::clamp(sample, min, max));
}
} else {
for (u32 i = 0; i < samples_to_decode; i++) {
out_buffer[i] = samples[i * channel_count + req.target_channel];
}
}
} break;
case 1:
if (req.target_channel != 0) {
LOG_ERROR(Service_Audio, "Invalid target channel, expected 0, got {}",
req.target_channel);
return 0;
}
const VAddr source{req.buffer + ((req.start_offset + req.offset) * sizeof(T))};
std::vector<T> samples(samples_to_decode);
memory.ReadBlockUnsafe(source, samples.data(), samples_to_decode * sizeof(T));
if constexpr (std::is_floating_point_v<T>) {
for (u32 i = 0; i < samples_to_decode; i++) {
auto sample{static_cast<s32>(samples[i * channel_count + req.target_channel] *
std::numeric_limits<s16>::max())};
out_buffer[i] = static_cast<s16>(std::clamp(sample, min, max));
}
} else {
std::memcpy(out_buffer.data(), samples.data(), samples_to_decode * sizeof(s16));
}
break;
}
return samples_to_decode;
}
/**
* Decode ADPCM data.
*
* @param memory - Core memory for reading samples.
* @param out_buffer - Output mix buffer to receive the samples.
* @param req - Information for how to decode.
* @return Number of samples decoded.
*/
static u32 DecodeAdpcm(Core::Memory::Memory& memory, std::span<s16> out_buffer,
const DecodeArg& req) {
constexpr u32 SamplesPerFrame{14};
constexpr u32 NibblesPerFrame{16};
if (req.buffer == 0 || req.buffer_size == 0) {
LOG_ERROR(Service_Audio, "Buffer is 0!");
return 0;
}
if (req.start_offset >= req.end_offset) {
LOG_ERROR(Service_Audio, "Start offset greater than end offset!");
return 0;
}
auto end{(req.end_offset % SamplesPerFrame) +
NibblesPerFrame * (req.end_offset / SamplesPerFrame)};
if (req.end_offset % SamplesPerFrame) {
end += 3;
} else {
end += 1;
}
if (end / 2 > req.buffer_size) {
LOG_ERROR(Service_Audio, "End greater than buffer size!");
return 0;
}
auto samples_to_process{
std::min(req.end_offset - req.start_offset - req.offset, req.samples_to_read)};
auto samples_to_read{samples_to_process};
auto start_pos{req.start_offset + req.offset};
auto samples_remaining_in_frame{start_pos % SamplesPerFrame};
auto position_in_frame{(start_pos / SamplesPerFrame) * NibblesPerFrame +
samples_remaining_in_frame};
if (samples_remaining_in_frame) {
position_in_frame += 2;
}
const auto size{std::max((samples_to_process / 8U) * SamplesPerFrame, 8U)};
std::vector<u8> wavebuffer(size, 0);
memory.ReadBlockUnsafe(req.buffer + position_in_frame / 2, wavebuffer.data(),
wavebuffer.size());
auto context{req.adpcm_context};
auto header{context->header};
u8 coeff_index{static_cast<u8>((header >> 4U) & 0xFU)};
u8 scale{static_cast<u8>(header & 0xFU)};
s32 coeff0{req.coefficients[coeff_index * 2 + 0]};
s32 coeff1{req.coefficients[coeff_index * 2 + 1]};
auto yn0{context->yn0};
auto yn1{context->yn1};
static constexpr std::array<s32, 16> Steps{
0, 1, 2, 3, 4, 5, 6, 7, -8, -7, -6, -5, -4, -3, -2, -1,
};
const auto decode_sample = [&](const s32 code) -> s16 {
const auto xn = code * (1 << scale);
const auto prediction = coeff0 * yn0 + coeff1 * yn1;
const auto sample = ((xn << 11) + 0x400 + prediction) >> 11;
const auto saturated = std::clamp<s32>(sample, -0x8000, 0x7FFF);
yn1 = yn0;
yn0 = static_cast<s16>(saturated);
return yn0;
};
u32 read_index{0};
u32 write_index{0};
while (samples_to_read > 0) {
// Are we at a new frame?
if ((position_in_frame % NibblesPerFrame) == 0) {
header = wavebuffer[read_index++];
coeff_index = (header >> 4) & 0xF;
scale = header & 0xF;
coeff0 = req.coefficients[coeff_index * 2 + 0];
coeff1 = req.coefficients[coeff_index * 2 + 1];
position_in_frame += 2;
// Can we consume all of this frame's samples?
if (samples_to_read >= SamplesPerFrame) {
// Can grab all samples until the next header
for (u32 i = 0; i < SamplesPerFrame / 2; i++) {
auto code0{Steps[(wavebuffer[read_index] >> 4) & 0xF]};
auto code1{Steps[wavebuffer[read_index] & 0xF]};
read_index++;
out_buffer[write_index++] = decode_sample(code0);
out_buffer[write_index++] = decode_sample(code1);
}
position_in_frame += SamplesPerFrame;
samples_to_read -= SamplesPerFrame;
continue;
}
}
// Decode a single sample
auto code{wavebuffer[read_index]};
if (position_in_frame & 1) {
code &= 0xF;
read_index++;
} else {
code >>= 4;
}
out_buffer[write_index++] = decode_sample(Steps[code]);
position_in_frame++;
samples_to_read--;
}
context->header = header;
context->yn0 = yn0;
context->yn1 = yn1;
return samples_to_process;
}
/**
* Decode implementation.
* Decode wavebuffers according to the given args.
*
* @param memory - Core memory to read data from.
* @param args - The wavebuffer data, and information for how to decode it.
*/
void DecodeFromWaveBuffers(Core::Memory::Memory& memory, const DecodeFromWaveBuffersArgs& args) {
auto& voice_state{*args.voice_state};
auto remaining_sample_count{args.sample_count};
auto fraction{voice_state.fraction};
const auto sample_rate_ratio{
(Common::FixedPoint<49, 15>(args.source_sample_rate) / args.target_sample_rate) *
args.pitch};
const auto size_required{fraction + remaining_sample_count * sample_rate_ratio};
if (size_required.to_int_floor() < 0) {
return;
}
auto pitch{PitchBySrcQuality[static_cast<u32>(args.src_quality)]};
if (static_cast<u32>(pitch + size_required.to_int_floor()) > TempBufferSize) {
return;
}
auto max_remaining_sample_count{
((Common::FixedPoint<17, 15>(TempBufferSize) - fraction) / sample_rate_ratio)
.to_uint_floor()};
max_remaining_sample_count = std::min(max_remaining_sample_count, remaining_sample_count);
auto wavebuffers_consumed{voice_state.wave_buffers_consumed};
auto wavebuffer_index{voice_state.wave_buffer_index};
auto played_sample_count{voice_state.played_sample_count};
bool is_buffer_starved{false};
u32 offset{voice_state.offset};
auto output_buffer{args.output};
std::vector<s16> temp_buffer(TempBufferSize, 0);
while (remaining_sample_count > 0) {
const auto samples_to_write{std::min(remaining_sample_count, max_remaining_sample_count)};
const auto samples_to_read{
(fraction + samples_to_write * sample_rate_ratio).to_uint_floor()};
u32 temp_buffer_pos{0};
if (!args.IsVoicePitchAndSrcSkippedSupported) {
for (u32 i = 0; i < pitch; i++) {
temp_buffer[i] = voice_state.sample_history[i];
}
temp_buffer_pos = pitch;
}
u32 samples_read{0};
while (samples_read < samples_to_read) {
if (wavebuffer_index >= MaxWaveBuffers) {
LOG_ERROR(Service_Audio, "Invalid wavebuffer index! {}", wavebuffer_index);
wavebuffer_index = 0;
voice_state.wave_buffer_valid.fill(false);
wavebuffers_consumed = MaxWaveBuffers;
}
if (!voice_state.wave_buffer_valid[wavebuffer_index]) {
is_buffer_starved = true;
break;
}
auto& wavebuffer{args.wave_buffers[wavebuffer_index]};
if (offset == 0 && args.sample_format == SampleFormat::Adpcm &&
wavebuffer.context != 0) {
memory.ReadBlockUnsafe(wavebuffer.context, &voice_state.adpcm_context,
wavebuffer.context_size);
}
auto start_offset{wavebuffer.start_offset};
auto end_offset{wavebuffer.end_offset};
if (wavebuffer.loop && voice_state.loop_count > 0 &&
wavebuffer.loop_start_offset != 0 && wavebuffer.loop_end_offset != 0 &&
wavebuffer.loop_start_offset <= wavebuffer.loop_end_offset) {
start_offset = wavebuffer.loop_start_offset;
end_offset = wavebuffer.loop_end_offset;
}
DecodeArg decode_arg{.buffer{wavebuffer.buffer},
.buffer_size{wavebuffer.buffer_size},
.start_offset{start_offset},
.end_offset{end_offset},
.channel_count{args.channel_count},
.coefficients{},
.adpcm_context{nullptr},
.target_channel{args.channel},
.offset{offset},
.samples_to_read{samples_to_read - samples_read}};
s32 samples_decoded{0};
switch (args.sample_format) {
case SampleFormat::PcmInt16:
samples_decoded = DecodePcm<s16>(
memory, {&temp_buffer[temp_buffer_pos], args.sample_count}, decode_arg);
break;
case SampleFormat::PcmFloat:
samples_decoded = DecodePcm<f32>(
memory, {&temp_buffer[temp_buffer_pos], args.sample_count}, decode_arg);
break;
case SampleFormat::Adpcm: {
decode_arg.adpcm_context = &voice_state.adpcm_context;
memory.ReadBlockUnsafe(args.data_address, &decode_arg.coefficients, args.data_size);
samples_decoded = DecodeAdpcm(
memory, {&temp_buffer[temp_buffer_pos], args.sample_count}, decode_arg);
} break;
default:
LOG_ERROR(Service_Audio, "Invalid sample format to decode {}",
static_cast<u32>(args.sample_format));
samples_decoded = 0;
break;
}
played_sample_count += samples_decoded;
samples_read += samples_decoded;
temp_buffer_pos += samples_decoded;
offset += samples_decoded;
if (samples_decoded == 0 || offset >= end_offset - start_offset) {
offset = 0;
if (!wavebuffer.loop) {
voice_state.wave_buffer_valid[wavebuffer_index] = false;
voice_state.loop_count = 0;
if (wavebuffer.stream_ended) {
played_sample_count = 0;
}
wavebuffer_index = (wavebuffer_index + 1) % MaxWaveBuffers;
wavebuffers_consumed++;
} else {
voice_state.loop_count++;
if (wavebuffer.loop_count > 0 &&
(voice_state.loop_count > wavebuffer.loop_count || samples_decoded == 0)) {
voice_state.wave_buffer_valid[wavebuffer_index] = false;
voice_state.loop_count = 0;
if (wavebuffer.stream_ended) {
played_sample_count = 0;
}
wavebuffer_index = (wavebuffer_index + 1) % MaxWaveBuffers;
wavebuffers_consumed++;
}
if (samples_decoded == 0) {
is_buffer_starved = true;
break;
}
if (args.IsVoicePlayedSampleCountResetAtLoopPointSupported) {
played_sample_count = 0;
}
}
}
}
if (args.IsVoicePitchAndSrcSkippedSupported) {
if (samples_read > output_buffer.size()) {
LOG_ERROR(Service_Audio, "Attempting to write past the end of output buffer!");
}
for (u32 i = 0; i < samples_read; i++) {
output_buffer[i] = temp_buffer[i];
}
} else {
std::memset(&temp_buffer[temp_buffer_pos], 0,
(samples_to_read - samples_read) * sizeof(s16));
Resample(output_buffer, temp_buffer, sample_rate_ratio, fraction, samples_to_write,
args.src_quality);
std::memcpy(voice_state.sample_history.data(), &temp_buffer[samples_to_read],
pitch * sizeof(s16));
}
remaining_sample_count -= samples_to_write;
if (remaining_sample_count != 0 && is_buffer_starved) {
LOG_ERROR(Service_Audio, "Samples remaining but buffer is starving??");
break;
}
output_buffer = output_buffer.subspan(samples_to_write);
}
voice_state.wave_buffers_consumed = wavebuffers_consumed;
voice_state.played_sample_count = played_sample_count;
voice_state.wave_buffer_index = wavebuffer_index;
voice_state.offset = offset;
voice_state.fraction = fraction;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,59 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <span>
#include "audio_core/common/common.h"
#include "audio_core/common/wave_buffer.h"
#include "audio_core/renderer/voice/voice_state.h"
#include "common/common_types.h"
namespace Core::Memory {
class Memory;
}
namespace AudioCore::AudioRenderer {
struct DecodeFromWaveBuffersArgs {
SampleFormat sample_format;
std::span<s32> output;
VoiceState* voice_state;
std::span<WaveBufferVersion2> wave_buffers;
s8 channel;
s8 channel_count;
SrcQuality src_quality;
f32 pitch;
u32 source_sample_rate;
u32 target_sample_rate;
u32 sample_count;
CpuAddr data_address;
u64 data_size;
bool IsVoicePlayedSampleCountResetAtLoopPointSupported;
bool IsVoicePitchAndSrcSkippedSupported;
};
struct DecodeArg {
CpuAddr buffer;
u64 buffer_size;
u32 start_offset;
u32 end_offset;
s8 channel_count;
std::array<s16, 16> coefficients;
VoiceState::AdpcmContext* adpcm_context;
s8 target_channel;
u32 offset;
u32 samples_to_read;
};
/**
* Decode wavebuffers according to the given args.
*
* @param memory - Core memory to read data from.
* @param args - The wavebuffer data, and information for how to decode it.
*/
void DecodeFromWaveBuffers(Core::Memory::Memory& memory, const DecodeFromWaveBuffersArgs& args);
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,86 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/data_source/decode.h"
#include "audio_core/renderer/command/data_source/pcm_float.h"
namespace AudioCore::AudioRenderer {
void PcmFloatDataSourceVersion1Command::Dump(const ADSP::CommandListProcessor& processor,
std::string& string) {
string +=
fmt::format("PcmFloatDataSourceVersion1Command\n\toutput_index {:02X} channel {} "
"channel count {} source sample rate {} target sample rate {} src quality {}\n",
output_index, channel_index, channel_count, sample_rate,
processor.target_sample_rate, src_quality);
}
void PcmFloatDataSourceVersion1Command::Process(const ADSP::CommandListProcessor& processor) {
auto out_buffer = processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count);
DecodeFromWaveBuffersArgs args{
.sample_format{SampleFormat::PcmFloat},
.output{out_buffer},
.voice_state{reinterpret_cast<VoiceState*>(voice_state)},
.wave_buffers{wave_buffers},
.channel{channel_index},
.channel_count{channel_count},
.src_quality{src_quality},
.pitch{pitch},
.source_sample_rate{sample_rate},
.target_sample_rate{processor.target_sample_rate},
.sample_count{processor.sample_count},
.data_address{0},
.data_size{0},
.IsVoicePlayedSampleCountResetAtLoopPointSupported{(flags & 1) != 0},
.IsVoicePitchAndSrcSkippedSupported{(flags & 2) != 0},
};
DecodeFromWaveBuffers(*processor.memory, args);
}
bool PcmFloatDataSourceVersion1Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
void PcmFloatDataSourceVersion2Command::Dump(const ADSP::CommandListProcessor& processor,
std::string& string) {
string +=
fmt::format("PcmFloatDataSourceVersion2Command\n\toutput_index {:02X} channel {} "
"channel count {} source sample rate {} target sample rate {} src quality {}\n",
output_index, channel_index, channel_count, sample_rate,
processor.target_sample_rate, src_quality);
}
void PcmFloatDataSourceVersion2Command::Process(const ADSP::CommandListProcessor& processor) {
auto out_buffer = processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count);
DecodeFromWaveBuffersArgs args{
.sample_format{SampleFormat::PcmFloat},
.output{out_buffer},
.voice_state{reinterpret_cast<VoiceState*>(voice_state)},
.wave_buffers{wave_buffers},
.channel{channel_index},
.channel_count{channel_count},
.src_quality{src_quality},
.pitch{pitch},
.source_sample_rate{sample_rate},
.target_sample_rate{processor.target_sample_rate},
.sample_count{processor.sample_count},
.data_address{0},
.data_size{0},
.IsVoicePlayedSampleCountResetAtLoopPointSupported{(flags & 1) != 0},
.IsVoicePitchAndSrcSkippedSupported{(flags & 2) != 0},
};
DecodeFromWaveBuffers(*processor.memory, args);
}
bool PcmFloatDataSourceVersion2Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,113 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/common/wave_buffer.h"
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command to decode PCM float-encoded version 1 wavebuffers
* into the output_index mix buffer.
*/
struct PcmFloatDataSourceVersion1Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Quality used for sample rate conversion
SrcQuality src_quality;
/// Mix buffer index for decoded samples
s16 output_index;
/// Flags to control decoding (see AudioCore::AudioRenderer::VoiceInfo::Flags)
u16 flags;
/// Wavebuffer sample rate
u32 sample_rate;
/// Pitch used for sample rate conversion
f32 pitch;
/// Target channel to read within the wavebuffer
s8 channel_index;
/// Number of channels within the wavebuffer
s8 channel_count;
/// Wavebuffers containing the wavebuffer address, context address, looping information etc
std::array<WaveBufferVersion2, MaxWaveBuffers> wave_buffers;
/// Voice state, updated each call and written back to game
CpuAddr voice_state;
};
/**
* AudioRenderer command to decode PCM float-encoded version 2 wavebuffers
* into the output_index mix buffer.
*/
struct PcmFloatDataSourceVersion2Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Quality used for sample rate conversion
SrcQuality src_quality;
/// Mix buffer index for decoded samples
s16 output_index;
/// Flags to control decoding (see AudioCore::AudioRenderer::VoiceInfo::Flags)
u16 flags;
/// Wavebuffer sample rate
u32 sample_rate;
/// Pitch used for sample rate conversion
f32 pitch;
/// Target channel to read within the wavebuffer
s8 channel_index;
/// Number of channels within the wavebuffer
s8 channel_count;
/// Wavebuffers containing the wavebuffer address, context address, looping information etc
std::array<WaveBufferVersion2, MaxWaveBuffers> wave_buffers;
/// Voice state, updated each call and written back to game
CpuAddr voice_state;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,87 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <span>
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/data_source/decode.h"
#include "audio_core/renderer/command/data_source/pcm_int16.h"
namespace AudioCore::AudioRenderer {
void PcmInt16DataSourceVersion1Command::Dump(const ADSP::CommandListProcessor& processor,
std::string& string) {
string +=
fmt::format("PcmInt16DataSourceVersion1Command\n\toutput_index {:02X} channel {} "
"channel count {} source sample rate {} target sample rate {} src quality {}\n",
output_index, channel_index, channel_count, sample_rate,
processor.target_sample_rate, src_quality);
}
void PcmInt16DataSourceVersion1Command::Process(const ADSP::CommandListProcessor& processor) {
auto out_buffer = processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count);
DecodeFromWaveBuffersArgs args{
.sample_format{SampleFormat::PcmInt16},
.output{out_buffer},
.voice_state{reinterpret_cast<VoiceState*>(voice_state)},
.wave_buffers{wave_buffers},
.channel{channel_index},
.channel_count{channel_count},
.src_quality{src_quality},
.pitch{pitch},
.source_sample_rate{sample_rate},
.target_sample_rate{processor.target_sample_rate},
.sample_count{processor.sample_count},
.data_address{0},
.data_size{0},
.IsVoicePlayedSampleCountResetAtLoopPointSupported{(flags & 1) != 0},
.IsVoicePitchAndSrcSkippedSupported{(flags & 2) != 0},
};
DecodeFromWaveBuffers(*processor.memory, args);
}
bool PcmInt16DataSourceVersion1Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
void PcmInt16DataSourceVersion2Command::Dump(const ADSP::CommandListProcessor& processor,
std::string& string) {
string +=
fmt::format("PcmInt16DataSourceVersion2Command\n\toutput_index {:02X} channel {} "
"channel count {} source sample rate {} target sample rate {} src quality {}\n",
output_index, channel_index, channel_count, sample_rate,
processor.target_sample_rate, src_quality);
}
void PcmInt16DataSourceVersion2Command::Process(const ADSP::CommandListProcessor& processor) {
auto out_buffer = processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count);
DecodeFromWaveBuffersArgs args{
.sample_format{SampleFormat::PcmInt16},
.output{out_buffer},
.voice_state{reinterpret_cast<VoiceState*>(voice_state)},
.wave_buffers{wave_buffers},
.channel{channel_index},
.channel_count{channel_count},
.src_quality{src_quality},
.pitch{pitch},
.source_sample_rate{sample_rate},
.target_sample_rate{processor.target_sample_rate},
.sample_count{processor.sample_count},
.data_address{0},
.data_size{0},
.IsVoicePlayedSampleCountResetAtLoopPointSupported{(flags & 1) != 0},
.IsVoicePitchAndSrcSkippedSupported{(flags & 2) != 0},
};
DecodeFromWaveBuffers(*processor.memory, args);
}
bool PcmInt16DataSourceVersion2Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,110 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/common/wave_buffer.h"
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command to decode PCM s16-encoded version 1 wavebuffers
* into the output_index mix buffer.
*/
struct PcmInt16DataSourceVersion1Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Quality used for sample rate conversion
SrcQuality src_quality;
/// Mix buffer index for decoded samples
s16 output_index;
/// Flags to control decoding (see AudioCore::AudioRenderer::VoiceInfo::Flags)
u16 flags;
/// Wavebuffer sample rate
u32 sample_rate;
/// Pitch used for sample rate conversion
f32 pitch;
/// Target channel to read within the wavebuffer
s8 channel_index;
/// Number of channels within the wavebuffer
s8 channel_count;
/// Wavebuffers containing the wavebuffer address, context address, looping information etc
std::array<WaveBufferVersion2, MaxWaveBuffers> wave_buffers;
/// Voice state, updated each call and written back to game
CpuAddr voice_state;
};
/**
* AudioRenderer command to decode PCM s16-encoded version 2 wavebuffers
* into the output_index mix buffer.
*/
struct PcmInt16DataSourceVersion2Command : ICommand {
/**
* Print this command's information to a string.
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Quality used for sample rate conversion
SrcQuality src_quality;
/// Mix buffer index for decoded samples
s16 output_index;
/// Flags to control decoding (see AudioCore::AudioRenderer::VoiceInfo::Flags)
u16 flags;
/// Wavebuffer sample rate
u32 sample_rate;
/// Pitch used for sample rate conversion
f32 pitch;
/// Target channel to read within the wavebuffer
s8 channel_index;
/// Number of channels within the wavebuffer
s8 channel_count;
/// Wavebuffers containing the wavebuffer address, context address, looping information etc
std::array<WaveBufferVersion2, MaxWaveBuffers> wave_buffers;
/// Voice state, updated each call and written back to game
CpuAddr voice_state;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,207 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/aux_.h"
#include "audio_core/renderer/effect/effect_aux_info.h"
#include "core/memory.h"
namespace AudioCore::AudioRenderer {
/**
* Reset an AuxBuffer.
*
* @param memory - Core memory for writing.
* @param aux_info - Memory address pointing to the AuxInfo to reset.
*/
static void ResetAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr aux_info) {
if (aux_info == 0) {
LOG_ERROR(Service_Audio, "Aux info is 0!");
return;
}
auto info{reinterpret_cast<AuxInfo::AuxInfoDsp*>(memory.GetPointer(aux_info))};
info->read_offset = 0;
info->write_offset = 0;
info->total_sample_count = 0;
}
/**
* Write the given input mix buffer to the memory at send_buffer, and update send_info_ if
* update_count is set, to notify the game that an update happened.
*
* @param memory - Core memory for writing.
* @param send_info_ - Meta information for where to write the mix buffer.
* @param sample_count - Unused.
* @param send_buffer - Memory address to write the mix buffer to.
* @param count_max - Maximum number of samples in the receiving buffer.
* @param input - Input mix buffer to write.
* @param write_count_ - Number of samples to write.
* @param write_offset - Current offset to begin writing the receiving buffer at.
* @param update_count - If non-zero, send_info_ will be updated.
* @return Number of samples written.
*/
static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr send_info_,
[[maybe_unused]] u32 sample_count, const CpuAddr send_buffer,
const u32 count_max, std::span<const s32> input,
const u32 write_count_, const u32 write_offset,
const u32 update_count) {
if (write_count_ > count_max) {
LOG_ERROR(Service_Audio,
"write_count must be smaller than count_max! write_count {}, count_max {}",
write_count_, count_max);
return 0;
}
if (input.empty()) {
LOG_ERROR(Service_Audio, "input buffer is empty!");
return 0;
}
if (send_buffer == 0) {
LOG_ERROR(Service_Audio, "send_buffer is 0!");
return 0;
}
if (count_max == 0) {
return 0;
}
AuxInfo::AuxInfoDsp send_info{};
memory.ReadBlockUnsafe(send_info_, &send_info, sizeof(AuxInfo::AuxInfoDsp));
u32 target_write_offset{send_info.write_offset + write_offset};
if (target_write_offset > count_max || write_count_ == 0) {
return 0;
}
u32 write_count{write_count_};
u32 write_pos{0};
while (write_count > 0) {
u32 to_write{std::min(count_max - target_write_offset, write_count)};
if (to_write > 0) {
memory.WriteBlockUnsafe(send_buffer + target_write_offset * sizeof(s32),
&input[write_pos], to_write * sizeof(s32));
}
target_write_offset = (target_write_offset + to_write) % count_max;
write_count -= to_write;
write_pos += to_write;
}
if (update_count) {
send_info.write_offset = (send_info.write_offset + update_count) % count_max;
}
memory.WriteBlockUnsafe(send_info_, &send_info, sizeof(AuxInfo::AuxInfoDsp));
return write_count_;
}
/**
* Read the given memory at return_buffer into the output mix buffer, and update return_info_ if
* update_count is set, to notify the game that an update happened.
*
* @param memory - Core memory for writing.
* @param return_info_ - Meta information for where to read the mix buffer.
* @param return_buffer - Memory address to read the samples from.
* @param count_max - Maximum number of samples in the receiving buffer.
* @param output - Output mix buffer which will receive the samples.
* @param count_ - Number of samples to read.
* @param read_offset - Current offset to begin reading the return_buffer at.
* @param update_count - If non-zero, send_info_ will be updated.
* @return Number of samples read.
*/
static u32 ReadAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr return_info_,
const CpuAddr return_buffer, const u32 count_max, std::span<s32> output,
const u32 count_, const u32 read_offset, const u32 update_count) {
if (count_max == 0) {
return 0;
}
if (count_ > count_max) {
LOG_ERROR(Service_Audio, "count must be smaller than count_max! count {}, count_max {}",
count_, count_max);
return 0;
}
if (output.empty()) {
LOG_ERROR(Service_Audio, "output buffer is empty!");
return 0;
}
if (return_buffer == 0) {
LOG_ERROR(Service_Audio, "return_buffer is 0!");
return 0;
}
AuxInfo::AuxInfoDsp return_info{};
memory.ReadBlockUnsafe(return_info_, &return_info, sizeof(AuxInfo::AuxInfoDsp));
u32 target_read_offset{return_info.read_offset + read_offset};
if (target_read_offset > count_max) {
return 0;
}
u32 read_count{count_};
u32 read_pos{0};
while (read_count > 0) {
u32 to_read{std::min(count_max - target_read_offset, read_count)};
if (to_read > 0) {
memory.ReadBlockUnsafe(return_buffer + target_read_offset * sizeof(s32),
&output[read_pos], to_read * sizeof(s32));
}
target_read_offset = (target_read_offset + to_read) % count_max;
read_count -= to_read;
read_pos += to_read;
}
if (update_count) {
return_info.read_offset = (return_info.read_offset + update_count) % count_max;
}
memory.WriteBlockUnsafe(return_info_, &return_info, sizeof(AuxInfo::AuxInfoDsp));
return count_;
}
void AuxCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("AuxCommand\n\tenabled {} input {:02X} output {:02X}\n", effect_enabled,
input, output);
}
void AuxCommand::Process(const ADSP::CommandListProcessor& processor) {
auto input_buffer{
processor.mix_buffers.subspan(input * processor.sample_count, processor.sample_count)};
auto output_buffer{
processor.mix_buffers.subspan(output * processor.sample_count, processor.sample_count)};
if (effect_enabled) {
WriteAuxBufferDsp(*processor.memory, send_buffer_info, processor.sample_count, send_buffer,
count_max, input_buffer, processor.sample_count, write_offset,
update_count);
auto read{ReadAuxBufferDsp(*processor.memory, return_buffer_info, return_buffer, count_max,
output_buffer, processor.sample_count, write_offset,
update_count)};
if (read != processor.sample_count) {
std::memset(&output_buffer[read], 0, processor.sample_count - read);
}
} else {
ResetAuxBufferDsp(*processor.memory, send_buffer_info);
ResetAuxBufferDsp(*processor.memory, return_buffer_info);
if (input != output) {
std::memcpy(output_buffer.data(), input_buffer.data(), output_buffer.size_bytes());
}
}
}
bool AuxCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,66 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command to read and write an auxiliary buffer, writing the input mix buffer to game
* memory, and reading into the output buffer from game memory.
*/
struct AuxCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer index
s16 input;
/// Output mix buffer index
s16 output;
/// Meta info for writing
CpuAddr send_buffer_info;
/// Meta info for reading
CpuAddr return_buffer_info;
/// Game memory write buffer
CpuAddr send_buffer;
/// Game memory read buffer
CpuAddr return_buffer;
/// Max samples to read/write
u32 count_max;
/// Current read/write offset
u32 write_offset;
/// Number of samples to update per call
u32 update_count;
/// is this effect enabled?
bool effect_enabled;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,118 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/biquad_filter.h"
#include "audio_core/renderer/voice/voice_state.h"
namespace AudioCore::AudioRenderer {
/**
* Biquad filter float implementation.
*
* @param output - Output container for filtered samples.
* @param input - Input container for samples to be filtered.
* @param b - Feedforward coefficients.
* @param a - Feedback coefficients.
* @param state - State to track previous samples between calls.
* @param sample_count - Number of samples to process.
*/
void ApplyBiquadFilterFloat(std::span<s32> output, std::span<const s32> input,
std::array<s16, 3>& b_, std::array<s16, 2>& a_,
VoiceState::BiquadFilterState& state, const u32 sample_count) {
constexpr s64 min{std::numeric_limits<s32>::min()};
constexpr s64 max{std::numeric_limits<s32>::max()};
std::array<f64, 3> b{Common::FixedPoint<50, 14>::from_base(b_[0]).to_double(),
Common::FixedPoint<50, 14>::from_base(b_[1]).to_double(),
Common::FixedPoint<50, 14>::from_base(b_[2]).to_double()};
std::array<f64, 2> a{Common::FixedPoint<50, 14>::from_base(a_[0]).to_double(),
Common::FixedPoint<50, 14>::from_base(a_[1]).to_double()};
std::array<f64, 4> s{state.s0.to_double(), state.s1.to_double(), state.s2.to_double(),
state.s3.to_double()};
for (u32 i = 0; i < sample_count; i++) {
f64 in_sample{static_cast<f64>(input[i])};
auto sample{in_sample * b[0] + s[0] * b[1] + s[1] * b[2] + s[2] * a[0] + s[3] * a[1]};
output[i] = static_cast<s32>(std::clamp(static_cast<s64>(sample), min, max));
s[1] = s[0];
s[0] = in_sample;
s[3] = s[2];
s[2] = sample;
}
state.s0 = s[0];
state.s1 = s[1];
state.s2 = s[2];
state.s3 = s[3];
}
/**
* Biquad filter s32 implementation.
*
* @param output - Output container for filtered samples.
* @param input - Input container for samples to be filtered.
* @param b - Feedforward coefficients.
* @param a - Feedback coefficients.
* @param state - State to track previous samples between calls.
* @param sample_count - Number of samples to process.
*/
static void ApplyBiquadFilterInt(std::span<s32> output, std::span<const s32> input,
std::array<s16, 3>& b_, std::array<s16, 2>& a_,
VoiceState::BiquadFilterState& state, const u32 sample_count) {
constexpr s64 min{std::numeric_limits<s32>::min()};
constexpr s64 max{std::numeric_limits<s32>::max()};
std::array<Common::FixedPoint<50, 14>, 3> b{
Common::FixedPoint<50, 14>::from_base(b_[0]),
Common::FixedPoint<50, 14>::from_base(b_[1]),
Common::FixedPoint<50, 14>::from_base(b_[2]),
};
std::array<Common::FixedPoint<50, 14>, 3> a{
Common::FixedPoint<50, 14>::from_base(a_[0]),
Common::FixedPoint<50, 14>::from_base(a_[1]),
};
for (u32 i = 0; i < sample_count; i++) {
s64 in_sample{input[i]};
auto sample{in_sample * b[0] + state.s0};
const auto out_sample{std::clamp(sample.to_long(), min, max)};
output[i] = static_cast<s32>(out_sample);
state.s0 = state.s1 + b[1] * in_sample + a[0] * out_sample;
state.s1 = 0 + b[2] * in_sample + a[1] * out_sample;
}
}
void BiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format(
"BiquadFilterCommand\n\tinput {:02X} output {:02X} needs_init {} use_float_processing {}\n",
input, output, needs_init, use_float_processing);
}
void BiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) {
auto state_{reinterpret_cast<VoiceState::BiquadFilterState*>(state)};
if (needs_init) {
std::memset(state_, 0, sizeof(VoiceState::BiquadFilterState));
}
auto input_buffer{
processor.mix_buffers.subspan(input * processor.sample_count, processor.sample_count)};
auto output_buffer{
processor.mix_buffers.subspan(output * processor.sample_count, processor.sample_count)};
if (use_float_processing) {
ApplyBiquadFilterFloat(output_buffer, input_buffer, biquad.b, biquad.a, *state_,
processor.sample_count);
} else {
ApplyBiquadFilterInt(output_buffer, input_buffer, biquad.b, biquad.a, *state_,
processor.sample_count);
}
}
bool BiquadFilterCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,74 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/voice/voice_info.h"
#include "audio_core/renderer/voice/voice_state.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for applying a biquad filter to the input mix buffer, saving the results to
* the output mix buffer.
*/
struct BiquadFilterCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer index
s16 input;
/// Output mix buffer index
s16 output;
/// Input parameters for biquad
VoiceInfo::BiquadFilterParameter biquad;
/// Biquad state, updated each call
CpuAddr state;
/// If true, reset the state
bool needs_init;
/// If true, use float processing rather than int
bool use_float_processing;
};
/**
* Biquad filter float implementation.
*
* @param output - Output container for filtered samples.
* @param input - Input container for samples to be filtered.
* @param b - Feedforward coefficients.
* @param a - Feedback coefficients.
* @param state - State to track previous samples.
* @param sample_count - Number of samples to process.
*/
void ApplyBiquadFilterFloat(std::span<s32> output, std::span<const s32> input,
std::array<s16, 3>& b, std::array<s16, 2>& a,
VoiceState::BiquadFilterState& state, const u32 sample_count);
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,142 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/capture.h"
#include "audio_core/renderer/effect/effect_aux_info.h"
#include "core/memory.h"
namespace AudioCore::AudioRenderer {
/**
* Reset an AuxBuffer.
*
* @param memory - Core memory for writing.
* @param aux_info - Memory address pointing to the AuxInfo to reset.
*/
static void ResetAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr aux_info) {
if (aux_info == 0) {
LOG_ERROR(Service_Audio, "Aux info is 0!");
return;
}
memory.Write32(VAddr(aux_info + offsetof(AuxInfo::AuxInfoDsp, read_offset)), 0);
memory.Write32(VAddr(aux_info + offsetof(AuxInfo::AuxInfoDsp, write_offset)), 0);
memory.Write32(VAddr(aux_info + offsetof(AuxInfo::AuxInfoDsp, total_sample_count)), 0);
}
/**
* Write the given input mix buffer to the memory at send_buffer, and update send_info_ if
* update_count is set, to notify the game that an update happened.
*
* @param memory - Core memory for writing.
* @param send_info_ - Header information for where to write the mix buffer.
* @param send_buffer - Memory address to write the mix buffer to.
* @param count_max - Maximum number of samples in the receiving buffer.
* @param input - Input mix buffer to write.
* @param write_count_ - Number of samples to write.
* @param write_offset - Current offset to begin writing the receiving buffer at.
* @param update_count - If non-zero, send_info_ will be updated.
* @return Number of samples written.
*/
static u32 WriteAuxBufferDsp(Core::Memory::Memory& memory, const CpuAddr send_info_,
const CpuAddr send_buffer, u32 count_max, std::span<const s32> input,
const u32 write_count_, const u32 write_offset,
const u32 update_count) {
if (write_count_ > count_max) {
LOG_ERROR(Service_Audio,
"write_count must be smaller than count_max! write_count {}, count_max {}",
write_count_, count_max);
return 0;
}
if (send_info_ == 0) {
LOG_ERROR(Service_Audio, "send_info is 0!");
return 0;
}
if (input.empty()) {
LOG_ERROR(Service_Audio, "input buffer is empty!");
return 0;
}
if (send_buffer == 0) {
LOG_ERROR(Service_Audio, "send_buffer is 0!");
return 0;
}
if (count_max == 0) {
return 0;
}
AuxInfo::AuxBufferInfo send_info{};
memory.ReadBlockUnsafe(send_info_, &send_info, sizeof(AuxInfo::AuxBufferInfo));
u32 target_write_offset{send_info.dsp_info.write_offset + write_offset};
if (target_write_offset > count_max || write_count_ == 0) {
return 0;
}
u32 write_count{write_count_};
u32 write_pos{0};
while (write_count > 0) {
u32 to_write{std::min(count_max - target_write_offset, write_count)};
if (to_write > 0) {
memory.WriteBlockUnsafe(send_buffer + target_write_offset * sizeof(s32),
&input[write_pos], to_write * sizeof(s32));
}
target_write_offset = (target_write_offset + to_write) % count_max;
write_count -= to_write;
write_pos += to_write;
}
if (update_count) {
const auto count_diff{send_info.dsp_info.total_sample_count -
send_info.cpu_info.total_sample_count};
if (count_diff >= count_max) {
auto dsp_lost_count{send_info.dsp_info.lost_sample_count + update_count};
if (dsp_lost_count - send_info.cpu_info.lost_sample_count <
send_info.dsp_info.lost_sample_count - send_info.cpu_info.lost_sample_count) {
dsp_lost_count = send_info.cpu_info.lost_sample_count - 1;
}
send_info.dsp_info.lost_sample_count = dsp_lost_count;
}
send_info.dsp_info.write_offset =
(send_info.dsp_info.write_offset + update_count + count_max) % count_max;
auto new_sample_count{send_info.dsp_info.total_sample_count + update_count};
if (new_sample_count - send_info.cpu_info.total_sample_count < count_diff) {
new_sample_count = send_info.cpu_info.total_sample_count - 1;
}
send_info.dsp_info.total_sample_count = new_sample_count;
}
memory.WriteBlockUnsafe(send_info_, &send_info, sizeof(AuxInfo::AuxBufferInfo));
return write_count_;
}
void CaptureCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("CaptureCommand\n\tenabled {} input {:02X} output {:02X}", effect_enabled,
input, output);
}
void CaptureCommand::Process(const ADSP::CommandListProcessor& processor) {
if (effect_enabled) {
auto input_buffer{
processor.mix_buffers.subspan(input * processor.sample_count, processor.sample_count)};
WriteAuxBufferDsp(*processor.memory, send_buffer_info, send_buffer, count_max, input_buffer,
processor.sample_count, write_offset, update_count);
} else {
ResetAuxBufferDsp(*processor.memory, send_buffer_info);
}
}
bool CaptureCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,62 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for capturing a mix buffer. That is, writing it back to a given game memory
* address.
*/
struct CaptureCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer index
s16 input;
/// Output mix buffer index
s16 output;
/// Meta info for writing
CpuAddr send_buffer_info;
/// Game memory write buffer
CpuAddr send_buffer;
/// Max samples to read/write
u32 count_max;
/// Current read/write offset
u32 write_offset;
/// Number of samples to update per call
u32 update_count;
/// is this effect enabled?
bool effect_enabled;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,244 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/delay.h"
namespace AudioCore::AudioRenderer {
/**
* Update the DelayInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
*/
static void SetDelayEffectParameter(const DelayInfo::ParameterVersion1& params,
DelayInfo::State& state) {
auto channel_spread{params.channel_spread};
state.feedback_gain = params.feedback_gain * 0.97998046875f;
state.delay_feedback_gain = state.feedback_gain * (1.0f - channel_spread);
if (params.channel_count == 4 || params.channel_count == 6) {
channel_spread >>= 1;
}
state.delay_feedback_cross_gain = channel_spread * state.feedback_gain;
state.lowpass_feedback_gain = params.lowpass_amount * 0.949951171875f;
state.lowpass_gain = 1.0f - state.lowpass_feedback_gain;
}
/**
* Initialize a new DelayInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
* @param workbuffer - Game-supplied memory for the state. (Unused)
*/
static void InitializeDelayEffect(const DelayInfo::ParameterVersion1& params,
DelayInfo::State& state,
[[maybe_unused]] const CpuAddr workbuffer) {
state = {};
for (u32 channel = 0; channel < params.channel_count; channel++) {
Common::FixedPoint<32, 32> sample_count_max{0.064f};
sample_count_max *= params.sample_rate.to_int_floor() * params.delay_time_max;
Common::FixedPoint<18, 14> delay_time{params.delay_time};
delay_time *= params.sample_rate / 1000;
Common::FixedPoint<32, 32> sample_count{delay_time};
if (sample_count > sample_count_max) {
sample_count = sample_count_max;
}
state.delay_lines[channel].sample_count_max = sample_count_max.to_int_floor();
state.delay_lines[channel].sample_count = sample_count.to_int_floor();
state.delay_lines[channel].buffer.resize(state.delay_lines[channel].sample_count, 0);
if (state.delay_lines[channel].buffer.size() == 0) {
state.delay_lines[channel].buffer.push_back(0);
}
state.delay_lines[channel].buffer_pos = 0;
state.delay_lines[channel].decay_rate = 1.0f;
}
SetDelayEffectParameter(params, state);
}
/**
* Delay effect impl, according to the parameters and current state, on the input mix buffers,
* saving the results to the output mix buffers.
*
* @param params - Input parameters to use.
* @param state - State to use, must be initialized (see InitializeDelayEffect).
* @param inputs - Input mix buffers to performan the delay on.
* @param outputs - Output mix buffers to receive the delayed samples.
* @param sample_count - Number of samples to process.
*/
template <size_t Channels>
static void ApplyDelay(const DelayInfo::ParameterVersion1& params, DelayInfo::State& state,
std::vector<std::span<const s32>>& inputs,
std::vector<std::span<s32>>& outputs, const u32 sample_count) {
for (u32 i = 0; i < sample_count; i++) {
std::array<Common::FixedPoint<50, 14>, Channels> input_samples{};
for (u32 channel = 0; channel < Channels; channel++) {
input_samples[channel] = inputs[channel][i] * 64;
}
std::array<Common::FixedPoint<50, 14>, Channels> delay_samples{};
for (u32 channel = 0; channel < Channels; channel++) {
delay_samples[channel] = state.delay_lines[channel].Read();
}
std::array<std::array<Common::FixedPoint<18, 14>, Channels>, Channels> matrix{};
if constexpr (Channels == 1) {
matrix = {{
{state.feedback_gain},
}};
} else if constexpr (Channels == 2) {
matrix = {{
{state.delay_feedback_gain, state.delay_feedback_cross_gain},
{state.delay_feedback_cross_gain, state.delay_feedback_gain},
}};
} else if constexpr (Channels == 4) {
matrix = {{
{state.delay_feedback_gain, state.delay_feedback_cross_gain,
state.delay_feedback_cross_gain, 0.0f},
{state.delay_feedback_cross_gain, state.delay_feedback_gain, 0.0f,
state.delay_feedback_cross_gain},
{state.delay_feedback_cross_gain, 0.0f, state.delay_feedback_gain,
state.delay_feedback_cross_gain},
{0.0f, state.delay_feedback_cross_gain, state.delay_feedback_cross_gain,
state.delay_feedback_gain},
}};
} else if constexpr (Channels == 6) {
matrix = {{
{state.delay_feedback_gain, 0.0f, 0.0f, 0.0f, state.delay_feedback_cross_gain,
state.delay_feedback_cross_gain},
{0.0f, state.delay_feedback_gain, 0.0f, state.delay_feedback_cross_gain,
state.delay_feedback_cross_gain, 0.0f},
{state.delay_feedback_cross_gain, 0.0f, state.delay_feedback_gain,
state.delay_feedback_cross_gain, 0.0f, 0.0f},
{0.0f, state.delay_feedback_cross_gain, state.delay_feedback_cross_gain,
state.delay_feedback_gain, 0.0f, 0.0f},
{state.delay_feedback_cross_gain, state.delay_feedback_cross_gain, 0.0f, 0.0f,
state.delay_feedback_gain, 0.0f},
{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, params.feedback_gain},
}};
}
std::array<Common::FixedPoint<50, 14>, Channels> gained_samples{};
for (u32 channel = 0; channel < Channels; channel++) {
Common::FixedPoint<50, 14> delay{};
for (u32 j = 0; j < Channels; j++) {
delay += delay_samples[j] * matrix[j][channel];
}
gained_samples[channel] = input_samples[channel] * params.in_gain + delay;
}
for (u32 channel = 0; channel < Channels; channel++) {
state.lowpass_z[channel] = gained_samples[channel] * state.lowpass_gain +
state.lowpass_z[channel] * state.lowpass_feedback_gain;
state.delay_lines[channel].Write(state.lowpass_z[channel]);
}
for (u32 channel = 0; channel < Channels; channel++) {
outputs[channel][i] = (input_samples[channel] * params.dry_gain +
delay_samples[channel] * params.wet_gain)
.to_int_floor() /
64;
}
}
}
/**
* Apply a delay effect if enabled, according to the parameters and current state, on the input mix
* buffers, saving the results to the output mix buffers.
*
* @param params - Input parameters to use.
* @param state - State to use, must be initialized (see InitializeDelayEffect).
* @param enabled - If enabled, delay will be applied, otherwise input is copied to output.
* @param inputs - Input mix buffers to performan the delay on.
* @param outputs - Output mix buffers to receive the delayed samples.
* @param sample_count - Number of samples to process.
*/
static void ApplyDelayEffect(const DelayInfo::ParameterVersion1& params, DelayInfo::State& state,
const bool enabled, std::vector<std::span<const s32>>& inputs,
std::vector<std::span<s32>>& outputs, const u32 sample_count) {
if (!IsChannelCountValid(params.channel_count)) {
LOG_ERROR(Service_Audio, "Invalid delay channels {}", params.channel_count);
return;
}
if (enabled) {
switch (params.channel_count) {
case 1:
ApplyDelay<1>(params, state, inputs, outputs, sample_count);
break;
case 2:
ApplyDelay<2>(params, state, inputs, outputs, sample_count);
break;
case 4:
ApplyDelay<4>(params, state, inputs, outputs, sample_count);
break;
case 6:
ApplyDelay<6>(params, state, inputs, outputs, sample_count);
break;
default:
for (u32 channel = 0; channel < params.channel_count; channel++) {
if (inputs[channel].data() != outputs[channel].data()) {
std::memcpy(outputs[channel].data(), inputs[channel].data(),
sample_count * sizeof(s32));
}
}
break;
}
} else {
for (u32 channel = 0; channel < params.channel_count; channel++) {
if (inputs[channel].data() != outputs[channel].data()) {
std::memcpy(outputs[channel].data(), inputs[channel].data(),
sample_count * sizeof(s32));
}
}
}
}
void DelayCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("DelayCommand\n\tenabled {} \n\tinputs: ", effect_enabled);
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", inputs[i]);
}
string += "\n\toutputs: ";
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", outputs[i]);
}
string += "\n";
}
void DelayCommand::Process(const ADSP::CommandListProcessor& processor) {
std::vector<std::span<const s32>> input_buffers(parameter.channel_count);
std::vector<std::span<s32>> output_buffers(parameter.channel_count);
for (s16 i = 0; i < parameter.channel_count; i++) {
input_buffers[i] = processor.mix_buffers.subspan(inputs[i] * processor.sample_count,
processor.sample_count);
output_buffers[i] = processor.mix_buffers.subspan(outputs[i] * processor.sample_count,
processor.sample_count);
}
auto state_{reinterpret_cast<DelayInfo::State*>(state)};
if (effect_enabled) {
if (parameter.state == DelayInfo::ParameterState::Updating) {
SetDelayEffectParameter(parameter, *state_);
} else if (parameter.state == DelayInfo::ParameterState::Initialized) {
InitializeDelayEffect(parameter, *state_, workbuffer);
}
}
ApplyDelayEffect(parameter, *state_, effect_enabled, input_buffers, output_buffers,
processor.sample_count);
}
bool DelayCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,60 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/effect/effect_delay_info.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for a delay effect. Delays inputs mix buffers according to the parameters
* and state, outputs receives the delayed samples.
*/
struct DelayCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer offsets for each channel
std::array<s8, MaxChannels> inputs;
/// Output mix buffer offsets for each channel
std::array<s8, MaxChannels> outputs;
/// Input parameters
DelayInfo::ParameterVersion1 parameter;
/// State, updated each call
CpuAddr state;
/// Game-supplied workbuffer (Unused)
CpuAddr workbuffer;
/// Is this effect enabled?
bool effect_enabled;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,428 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numbers>
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/i3dl2_reverb.h"
namespace AudioCore::AudioRenderer {
constexpr std::array<f32, I3dl2ReverbInfo::MaxDelayLines> MinDelayLineTimes{
5.0f,
6.0f,
13.0f,
14.0f,
};
constexpr std::array<f32, I3dl2ReverbInfo::MaxDelayLines> MaxDelayLineTimes{
45.7042007446f,
82.7817001343f,
149.938293457f,
271.575805664f,
};
constexpr std::array<f32, I3dl2ReverbInfo::MaxDelayLines> Decay0MaxDelayLineTimes{17.0f, 13.0f,
9.0f, 7.0f};
constexpr std::array<f32, I3dl2ReverbInfo::MaxDelayLines> Decay1MaxDelayLineTimes{19.0f, 11.0f,
10.0f, 6.0f};
constexpr std::array<f32, I3dl2ReverbInfo::MaxDelayTaps> EarlyTapTimes{
0.0171360000968f,
0.0591540001333f,
0.161733001471f,
0.390186011791f,
0.425262004137f,
0.455410987139f,
0.689737021923f,
0.74590998888f,
0.833844006062f,
0.859502017498f,
0.0f,
0.0750240013003f,
0.168788000941f,
0.299901008606f,
0.337442994118f,
0.371903002262f,
0.599011003971f,
0.716741025448f,
0.817858994007f,
0.85166400671f,
};
constexpr std::array<f32, I3dl2ReverbInfo::MaxDelayTaps> EarlyGains{
0.67096f, 0.61027f, 1.0f, 0.3568f, 0.68361f, 0.65978f, 0.51939f,
0.24712f, 0.45945f, 0.45021f, 0.64196f, 0.54879f, 0.92925f, 0.3827f,
0.72867f, 0.69794f, 0.5464f, 0.24563f, 0.45214f, 0.44042f};
/**
* Update the I3dl2ReverbInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
* @param reset - If enabled, the state buffers will be reset. Only set this on initialize.
*/
static void UpdateI3dl2ReverbEffectParameter(const I3dl2ReverbInfo::ParameterVersion1& params,
I3dl2ReverbInfo::State& state, const bool reset) {
const auto pow_10 = [](f32 val) -> f32 {
return (val >= 0.0f) ? 1.0f : (val <= -5.3f) ? 0.0f : std::pow(10.0f, val);
};
const auto sin = [](f32 degrees) -> f32 {
return std::sin(degrees * std::numbers::pi_v<f32> / 180.0f);
};
const auto cos = [](f32 degrees) -> f32 {
return std::cos(degrees * std::numbers::pi_v<f32> / 180.0f);
};
Common::FixedPoint<50, 14> delay{static_cast<f32>(params.sample_rate) / 1000.0f};
Common::FixedPoint<50, 14> early_gain{
std::min(params.room_gain + params.reflection_gain, 5000.0f) / 2000.0f};
state.early_gain = pow_10(early_gain.to_float());
Common::FixedPoint<50, 14> late_gain{std::min(params.room_gain + params.reverb_gain, 5000.0f) /
2000.0f};
state.late_gain = pow_10(late_gain.to_float());
Common::FixedPoint<50, 14> hf_gain{pow_10(params.room_HF_gain / 2000.0f)};
if (hf_gain >= 1.0f) {
state.lowpass_1 = 0.0f;
state.lowpass_2 = 1.0f;
} else {
const auto reference_hf{(params.reference_HF * 256.0f) /
static_cast<f32>(params.sample_rate)};
const Common::FixedPoint<50, 14> a{1.0f - hf_gain.to_float()};
const Common::FixedPoint<50, 14> b{2.0f + (-cos(reference_hf) * (hf_gain * 2.0f))};
const Common::FixedPoint<50, 14> c{
std::sqrt(std::pow(b.to_float(), 2.0f) + (std::pow(a.to_float(), 2.0f) * -4.0f))};
state.lowpass_1 = std::min(((b - c) / (a * 2.0f)).to_float(), 0.99723f);
state.lowpass_2 = 1.0f - state.lowpass_1;
}
state.early_to_late_taps =
(((params.reflection_delay + params.late_reverb_delay_time) * 1000.0f) * delay).to_int();
state.last_reverb_echo = params.late_reverb_diffusion * 0.6f * 0.01f;
for (u32 i = 0; i < I3dl2ReverbInfo::MaxDelayLines; i++) {
auto curr_delay{
((MinDelayLineTimes[i] + (params.late_reverb_density / 100.0f) *
(MaxDelayLineTimes[i] - MinDelayLineTimes[i])) *
delay)
.to_int()};
state.fdn_delay_lines[i].SetDelay(curr_delay);
const auto a{
(static_cast<f32>(state.fdn_delay_lines[i].delay + state.decay_delay_lines0[i].delay +
state.decay_delay_lines1[i].delay) *
-60.0f) /
(params.late_reverb_decay_time * static_cast<f32>(params.sample_rate))};
const auto b{a / params.late_reverb_HF_decay_ratio};
const auto c{
cos(((params.reference_HF * 0.5f) * 128.0f) / static_cast<f32>(params.sample_rate)) /
sin(((params.reference_HF * 0.5f) * 128.0f) / static_cast<f32>(params.sample_rate))};
const auto d{pow_10((b - a) / 40.0f)};
const auto e{pow_10((b + a) / 40.0f) * 0.7071f};
state.lowpass_coeff[i][0] = ((c * d + 1.0f) * e) / (c + d);
state.lowpass_coeff[i][1] = ((1.0f - (c * d)) * e) / (c + d);
state.lowpass_coeff[i][2] = (c - d) / (c + d);
state.decay_delay_lines0[i].wet_gain = state.last_reverb_echo;
state.decay_delay_lines1[i].wet_gain = state.last_reverb_echo * -0.9f;
}
if (reset) {
state.shelf_filter.fill(0.0f);
state.lowpass_0 = 0.0f;
for (u32 i = 0; i < I3dl2ReverbInfo::MaxDelayLines; i++) {
std::ranges::fill(state.fdn_delay_lines[i].buffer, 0);
std::ranges::fill(state.decay_delay_lines0[i].buffer, 0);
std::ranges::fill(state.decay_delay_lines1[i].buffer, 0);
}
std::ranges::fill(state.center_delay_line.buffer, 0);
std::ranges::fill(state.early_delay_line.buffer, 0);
}
const auto reflection_time{(params.late_reverb_delay_time * 0.9998f + 0.02f) * 1000.0f};
const auto reflection_delay{params.reflection_delay * 1000.0f};
for (u32 i = 0; i < I3dl2ReverbInfo::MaxDelayTaps; i++) {
auto length{((reflection_delay + reflection_time * EarlyTapTimes[i]) * delay).to_int()};
if (length >= state.early_delay_line.max_delay) {
length = state.early_delay_line.max_delay;
}
state.early_tap_steps[i] = length;
}
}
/**
* Initialize a new I3dl2ReverbInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
* @param workbuffer - Game-supplied memory for the state. (Unused)
*/
static void InitializeI3dl2ReverbEffect(const I3dl2ReverbInfo::ParameterVersion1& params,
I3dl2ReverbInfo::State& state, const CpuAddr workbuffer) {
state = {};
Common::FixedPoint<50, 14> delay{static_cast<f32>(params.sample_rate) / 1000};
for (u32 i = 0; i < I3dl2ReverbInfo::MaxDelayLines; i++) {
auto fdn_delay_time{(MaxDelayLineTimes[i] * delay).to_uint_floor()};
state.fdn_delay_lines[i].Initialize(fdn_delay_time);
auto decay0_delay_time{(Decay0MaxDelayLineTimes[i] * delay).to_uint_floor()};
state.decay_delay_lines0[i].Initialize(decay0_delay_time);
auto decay1_delay_time{(Decay1MaxDelayLineTimes[i] * delay).to_uint_floor()};
state.decay_delay_lines1[i].Initialize(decay1_delay_time);
}
const auto center_delay_time{(5 * delay).to_uint_floor()};
state.center_delay_line.Initialize(center_delay_time);
const auto early_delay_time{(400 * delay).to_uint_floor()};
state.early_delay_line.Initialize(early_delay_time);
UpdateI3dl2ReverbEffectParameter(params, state, true);
}
/**
* Pass-through the effect, copying input to output directly, with no reverb applied.
*
* @param inputs - Array of input mix buffers to copy.
* @param outputs - Array of output mix buffers to receive copy.
* @param channel_count - Number of channels in inputs and outputs.
* @param sample_count - Number of samples within each channel (unused).
*/
static void ApplyI3dl2ReverbEffectBypass(std::span<std::span<const s32>> inputs,
std::span<std::span<s32>> outputs, const u32 channel_count,
[[maybe_unused]] const u32 sample_count) {
for (u32 i = 0; i < channel_count; i++) {
if (inputs[i].data() != outputs[i].data()) {
std::memcpy(outputs[i].data(), inputs[i].data(), outputs[i].size_bytes());
}
}
}
/**
* Tick the delay lines, reading and returning their current output, and writing a new decaying
* sample (mix).
*
* @param decay0 - The first decay line.
* @param decay1 - The second decay line.
* @param fdn - Feedback delay network.
* @param mix - The new calculated sample to be written and decayed.
* @return The next delayed and decayed sample.
*/
static Common::FixedPoint<50, 14> Axfx2AllPassTick(I3dl2ReverbInfo::I3dl2DelayLine& decay0,
I3dl2ReverbInfo::I3dl2DelayLine& decay1,
I3dl2ReverbInfo::I3dl2DelayLine& fdn,
const Common::FixedPoint<50, 14> mix) {
auto val{decay0.Read()};
auto mixed{mix - (val * decay0.wet_gain)};
auto out{decay0.Tick(mixed) + (mixed * decay0.wet_gain)};
val = decay1.Read();
mixed = out - (val * decay1.wet_gain);
out = decay1.Tick(mixed) + (mixed * decay1.wet_gain);
fdn.Tick(out);
return out;
}
/**
* Impl. Apply a I3DL2 reverb according to the current state, on the input mix buffers,
* saving the results to the output mix buffers.
*
* @tparam Channels - Number of channels to process. 1-6.
Inputs/outputs should have this many buffers.
* @param state - State to use, must be initialized (see InitializeI3dl2ReverbEffect).
* @param inputs - Input mix buffers to perform the reverb on.
* @param outputs - Output mix buffers to receive the reverbed samples.
* @param sample_count - Number of samples to process.
*/
template <size_t Channels>
static void ApplyI3dl2ReverbEffect(I3dl2ReverbInfo::State& state,
std::span<std::span<const s32>> inputs,
std::span<std::span<s32>> outputs, const u32 sample_count) {
constexpr std::array<u8, I3dl2ReverbInfo::MaxDelayTaps> OutTapIndexes1Ch{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
constexpr std::array<u8, I3dl2ReverbInfo::MaxDelayTaps> OutTapIndexes2Ch{
0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1,
};
constexpr std::array<u8, I3dl2ReverbInfo::MaxDelayTaps> OutTapIndexes4Ch{
0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 3, 3, 3,
};
constexpr std::array<u8, I3dl2ReverbInfo::MaxDelayTaps> OutTapIndexes6Ch{
4, 0, 0, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2,
};
std::span<const u8> tap_indexes{};
if constexpr (Channels == 1) {
tap_indexes = OutTapIndexes1Ch;
} else if constexpr (Channels == 2) {
tap_indexes = OutTapIndexes2Ch;
} else if constexpr (Channels == 4) {
tap_indexes = OutTapIndexes4Ch;
} else if constexpr (Channels == 6) {
tap_indexes = OutTapIndexes6Ch;
}
for (u32 i = 0; i < sample_count; i++) {
Common::FixedPoint<50, 14> early_to_late_tap{
state.early_delay_line.TapOut(state.early_to_late_taps)};
std::array<Common::FixedPoint<50, 14>, Channels> output_samples{};
for (u32 early_tap = 0; early_tap < I3dl2ReverbInfo::MaxDelayTaps; early_tap++) {
output_samples[tap_indexes[early_tap]] +=
state.early_delay_line.TapOut(state.early_tap_steps[early_tap]) *
EarlyGains[early_tap];
if constexpr (Channels == 6) {
output_samples[5] +=
state.early_delay_line.TapOut(state.early_tap_steps[early_tap]) *
EarlyGains[early_tap];
}
}
Common::FixedPoint<50, 14> current_sample{};
for (u32 channel = 0; channel < Channels; channel++) {
current_sample += inputs[channel][i];
}
state.lowpass_0 =
(current_sample * state.lowpass_2 + state.lowpass_0 * state.lowpass_1).to_float();
state.early_delay_line.Tick(state.lowpass_0);
for (u32 channel = 0; channel < Channels; channel++) {
output_samples[channel] *= state.early_gain;
}
std::array<Common::FixedPoint<50, 14>, I3dl2ReverbInfo::MaxDelayLines> filtered_samples{};
for (u32 delay_line = 0; delay_line < I3dl2ReverbInfo::MaxDelayLines; delay_line++) {
filtered_samples[delay_line] =
state.fdn_delay_lines[delay_line].Read() * state.lowpass_coeff[delay_line][0] +
state.shelf_filter[delay_line];
state.shelf_filter[delay_line] =
(filtered_samples[delay_line] * state.lowpass_coeff[delay_line][2] +
state.fdn_delay_lines[delay_line].Read() * state.lowpass_coeff[delay_line][1])
.to_float();
}
const std::array<Common::FixedPoint<50, 14>, I3dl2ReverbInfo::MaxDelayLines> mix_matrix{
filtered_samples[1] + filtered_samples[2] + early_to_late_tap * state.late_gain,
-filtered_samples[0] - filtered_samples[3] + early_to_late_tap * state.late_gain,
filtered_samples[0] - filtered_samples[3] + early_to_late_tap * state.late_gain,
filtered_samples[1] - filtered_samples[2] + early_to_late_tap * state.late_gain,
};
std::array<Common::FixedPoint<50, 14>, I3dl2ReverbInfo::MaxDelayLines> allpass_samples{};
for (u32 delay_line = 0; delay_line < I3dl2ReverbInfo::MaxDelayLines; delay_line++) {
allpass_samples[delay_line] = Axfx2AllPassTick(
state.decay_delay_lines0[delay_line], state.decay_delay_lines1[delay_line],
state.fdn_delay_lines[delay_line], mix_matrix[delay_line]);
}
const auto out_channels{std::min(Channels, size_t(4))};
for (u32 channel = 0; channel < out_channels; channel++) {
auto out_sample{output_samples[channel] + allpass_samples[channel] +
state.dry_gain * static_cast<f32>(inputs[channel][i])};
outputs[channel][i] =
static_cast<s32>(std::clamp(out_sample.to_float(), -8388600.0f, 8388600.0f));
}
if constexpr (Channels == 6) {
auto center{
state.center_delay_line.Tick((allpass_samples[2] - allpass_samples[3]) * 0.5f)};
auto out_sample{static_cast<f32>(inputs[4][i]) * state.dry_gain +
output_samples[4] * state.early_gain + center};
outputs[4][i] =
static_cast<s32>(std::clamp(out_sample.to_float(), -8388600.0f, 8388600.0f));
out_sample = static_cast<f32>(inputs[5][i]) * state.dry_gain +
output_samples[5] * state.early_gain + allpass_samples[3];
outputs[5][i] =
static_cast<s32>(std::clamp(out_sample.to_float(), -8388600.0f, 8388600.0f));
}
}
}
/**
* Apply a I3DL2 reverb if enabled, according to the current state, on the input mix buffers,
* saving the results to the output mix buffers.
*
* @param params - Input parameters to use.
* @param state - State to use, must be initialized (see InitializeI3dl2ReverbEffect).
* @param enabled - If enabled, delay will be applied, otherwise input is copied to output.
* @param inputs - Input mix buffers to performan the delay on.
* @param outputs - Output mix buffers to receive the delayed samples.
* @param sample_count - Number of samples to process.
*/
static void ApplyI3dl2ReverbEffect(const I3dl2ReverbInfo::ParameterVersion1& params,
I3dl2ReverbInfo::State& state, const bool enabled,
std::span<std::span<const s32>> inputs,
std::span<std::span<s32>> outputs, const u32 sample_count) {
if (enabled) {
switch (params.channel_count) {
case 0:
return;
case 1:
ApplyI3dl2ReverbEffect<1>(state, inputs, outputs, sample_count);
break;
case 2:
ApplyI3dl2ReverbEffect<2>(state, inputs, outputs, sample_count);
break;
case 4:
ApplyI3dl2ReverbEffect<4>(state, inputs, outputs, sample_count);
break;
case 6:
ApplyI3dl2ReverbEffect<6>(state, inputs, outputs, sample_count);
break;
default:
ApplyI3dl2ReverbEffectBypass(inputs, outputs, params.channel_count, sample_count);
break;
}
} else {
ApplyI3dl2ReverbEffectBypass(inputs, outputs, params.channel_count, sample_count);
}
}
void I3dl2ReverbCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("I3dl2ReverbCommand\n\tenabled {} \n\tinputs: ", effect_enabled);
for (u32 i = 0; i < parameter.channel_count; i++) {
string += fmt::format("{:02X}, ", inputs[i]);
}
string += "\n\toutputs: ";
for (u32 i = 0; i < parameter.channel_count; i++) {
string += fmt::format("{:02X}, ", outputs[i]);
}
string += "\n";
}
void I3dl2ReverbCommand::Process(const ADSP::CommandListProcessor& processor) {
std::vector<std::span<const s32>> input_buffers(parameter.channel_count);
std::vector<std::span<s32>> output_buffers(parameter.channel_count);
for (u32 i = 0; i < parameter.channel_count; i++) {
input_buffers[i] = processor.mix_buffers.subspan(inputs[i] * processor.sample_count,
processor.sample_count);
output_buffers[i] = processor.mix_buffers.subspan(outputs[i] * processor.sample_count,
processor.sample_count);
}
auto state_{reinterpret_cast<I3dl2ReverbInfo::State*>(state)};
if (effect_enabled) {
if (parameter.state == I3dl2ReverbInfo::ParameterState::Updating) {
UpdateI3dl2ReverbEffectParameter(parameter, *state_, false);
} else if (parameter.state == I3dl2ReverbInfo::ParameterState::Initialized) {
InitializeI3dl2ReverbEffect(parameter, *state_, workbuffer);
}
}
ApplyI3dl2ReverbEffect(parameter, *state_, effect_enabled, input_buffers, output_buffers,
processor.sample_count);
}
bool I3dl2ReverbCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,60 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/effect/effect_i3dl2_info.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for a I3DL2Reverb effect. Apply a reverb to inputs mix buffer according to
* the I3DL2 spec, outputs receives the results.
*/
struct I3dl2ReverbCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer offsets for each channel
std::array<s16, MaxChannels> inputs;
/// Output mix buffer offsets for each channel
std::array<s16, MaxChannels> outputs;
/// Input parameters
I3dl2ReverbInfo::ParameterVersion1 parameter;
/// State, updated each call
CpuAddr state;
/// Game-supplied workbuffer (Unused)
CpuAddr workbuffer;
/// Is this effect enabled?
bool effect_enabled;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,202 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/light_limiter.h"
namespace AudioCore::AudioRenderer {
/**
* Update the LightLimiterInfo state according to the given parameters.
* A no-op.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
*/
static void UpdateLightLimiterEffectParameter(const LightLimiterInfo::ParameterVersion2& params,
LightLimiterInfo::State& state) {}
/**
* Initialize a new LightLimiterInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
* @param workbuffer - Game-supplied memory for the state. (Unused)
*/
static void InitializeLightLimiterEffect(const LightLimiterInfo::ParameterVersion2& params,
LightLimiterInfo::State& state, const CpuAddr workbuffer) {
state = {};
state.samples_average.fill(0.0f);
state.compression_gain.fill(1.0f);
state.look_ahead_sample_offsets.fill(0);
for (u32 i = 0; i < params.channel_count; i++) {
state.look_ahead_sample_buffers[i].resize(params.look_ahead_samples_max, 0.0f);
}
}
/**
* Apply a light limiter effect if enabled, according to the current state, on the input mix
* buffers, saving the results to the output mix buffers.
*
* @param params - Input parameters to use.
* @param state - State to use, must be initialized (see InitializeLightLimiterEffect).
* @param enabled - If enabled, limiter will be applied, otherwise input is copied to output.
* @param inputs - Input mix buffers to perform the limiter on.
* @param outputs - Output mix buffers to receive the limited samples.
* @param sample_count - Number of samples to process.
* @params statistics - Optional output statistics, only used with version 2.
*/
static void ApplyLightLimiterEffect(const LightLimiterInfo::ParameterVersion2& params,
LightLimiterInfo::State& state, const bool enabled,
std::vector<std::span<const s32>>& inputs,
std::vector<std::span<s32>>& outputs, const u32 sample_count,
LightLimiterInfo::StatisticsInternal* statistics) {
constexpr s64 min{std::numeric_limits<s32>::min()};
constexpr s64 max{std::numeric_limits<s32>::max()};
if (enabled) {
if (statistics && params.statistics_reset_required) {
for (u32 i = 0; i < params.channel_count; i++) {
statistics->channel_compression_gain_min[i] = 1.0f;
statistics->channel_max_sample[i] = 0;
}
}
for (u32 sample_index = 0; sample_index < sample_count; sample_index++) {
for (u32 channel = 0; channel < params.channel_count; channel++) {
auto sample{Common::FixedPoint<49, 15>(inputs[channel][sample_index]) *
params.input_gain};
auto abs_sample{sample};
if (sample < 0.0f) {
abs_sample = -sample;
}
auto coeff{abs_sample > state.samples_average[channel] ? params.attack_coeff
: params.release_coeff};
state.samples_average[channel] +=
((abs_sample - state.samples_average[channel]) * coeff).to_float();
auto above_threshold{state.samples_average[channel] > params.threshold};
auto attenuation{above_threshold ? params.threshold / state.samples_average[channel]
: 1.0f};
coeff = attenuation < state.compression_gain[channel] ? params.attack_coeff
: params.release_coeff;
state.compression_gain[channel] +=
(attenuation - state.compression_gain[channel]) * coeff;
auto lookahead_sample{
state.look_ahead_sample_buffers[channel]
[state.look_ahead_sample_offsets[channel]]};
state.look_ahead_sample_buffers[channel][state.look_ahead_sample_offsets[channel]] =
sample;
state.look_ahead_sample_offsets[channel] =
(state.look_ahead_sample_offsets[channel] + 1) % params.look_ahead_samples_min;
outputs[channel][sample_index] = static_cast<s32>(std::clamp(
(lookahead_sample * state.compression_gain[channel] * params.output_gain)
.to_long(),
min, max));
if (statistics) {
statistics->channel_max_sample[channel] =
std::max(statistics->channel_max_sample[channel], abs_sample.to_float());
statistics->channel_compression_gain_min[channel] =
std::min(statistics->channel_compression_gain_min[channel],
state.compression_gain[channel].to_float());
}
}
}
} else {
for (u32 i = 0; i < params.channel_count; i++) {
if (params.inputs[i] != params.outputs[i]) {
std::memcpy(outputs[i].data(), inputs[i].data(), outputs[i].size_bytes());
}
}
}
}
void LightLimiterVersion1Command::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("LightLimiterVersion1Command\n\tinputs: ");
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", inputs[i]);
}
string += "\n\toutputs: ";
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", outputs[i]);
}
string += "\n";
}
void LightLimiterVersion1Command::Process(const ADSP::CommandListProcessor& processor) {
std::vector<std::span<const s32>> input_buffers(parameter.channel_count);
std::vector<std::span<s32>> output_buffers(parameter.channel_count);
for (u32 i = 0; i < parameter.channel_count; i++) {
input_buffers[i] = processor.mix_buffers.subspan(inputs[i] * processor.sample_count,
processor.sample_count);
output_buffers[i] = processor.mix_buffers.subspan(outputs[i] * processor.sample_count,
processor.sample_count);
}
auto state_{reinterpret_cast<LightLimiterInfo::State*>(state)};
if (effect_enabled) {
if (parameter.state == LightLimiterInfo::ParameterState::Updating) {
UpdateLightLimiterEffectParameter(parameter, *state_);
} else if (parameter.state == LightLimiterInfo::ParameterState::Initialized) {
InitializeLightLimiterEffect(parameter, *state_, workbuffer);
}
}
LightLimiterInfo::StatisticsInternal* statistics{nullptr};
ApplyLightLimiterEffect(parameter, *state_, effect_enabled, input_buffers, output_buffers,
processor.sample_count, statistics);
}
bool LightLimiterVersion1Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
void LightLimiterVersion2Command::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("LightLimiterVersion2Command\n\tinputs: \n");
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", inputs[i]);
}
string += "\n\toutputs: ";
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", outputs[i]);
}
string += "\n";
}
void LightLimiterVersion2Command::Process(const ADSP::CommandListProcessor& processor) {
auto state_{reinterpret_cast<LightLimiterInfo::State*>(state)};
std::vector<std::span<const s32>> input_buffers(parameter.channel_count);
std::vector<std::span<s32>> output_buffers(parameter.channel_count);
for (u32 i = 0; i < parameter.channel_count; i++) {
input_buffers[i] = processor.mix_buffers.subspan(inputs[i] * processor.sample_count,
processor.sample_count);
output_buffers[i] = processor.mix_buffers.subspan(outputs[i] * processor.sample_count,
processor.sample_count);
}
if (effect_enabled) {
if (parameter.state == LightLimiterInfo::ParameterState::Updating) {
UpdateLightLimiterEffectParameter(parameter, *state_);
} else if (parameter.state == LightLimiterInfo::ParameterState::Initialized) {
InitializeLightLimiterEffect(parameter, *state_, workbuffer);
}
}
auto statistics{reinterpret_cast<LightLimiterInfo::StatisticsInternal*>(result_state)};
ApplyLightLimiterEffect(parameter, *state_, effect_enabled, input_buffers, output_buffers,
processor.sample_count, statistics);
}
bool LightLimiterVersion2Command::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,103 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/effect/effect_light_limiter_info.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for limiting volume between a high and low threshold.
* Version 1.
*/
struct LightLimiterVersion1Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer offsets for each channel
std::array<s16, MaxChannels> inputs;
/// Output mix buffer offsets for each channel
std::array<s16, MaxChannels> outputs;
/// Input parameters
LightLimiterInfo::ParameterVersion2 parameter;
/// State, updated each call
CpuAddr state;
/// Game-supplied workbuffer (Unused)
CpuAddr workbuffer;
/// Is this effect enabled?
bool effect_enabled;
};
/**
* AudioRenderer command for limiting volume between a high and low threshold.
* Version 2 with output statistics.
*/
struct LightLimiterVersion2Command : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer offsets for each channel
std::array<s16, MaxChannels> inputs;
/// Output mix buffer offsets for each channel
std::array<s16, MaxChannels> outputs;
/// Input parameters
LightLimiterInfo::ParameterVersion2 parameter;
/// State, updated each call
CpuAddr state;
/// Game-supplied workbuffer (Unused)
CpuAddr workbuffer;
/// Optional statistics, sent back to the sysmodule
CpuAddr result_state;
/// Is this effect enabled?
bool effect_enabled;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,45 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/biquad_filter.h"
#include "audio_core/renderer/command/effect/multi_tap_biquad_filter.h"
namespace AudioCore::AudioRenderer {
void MultiTapBiquadFilterCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format(
"MultiTapBiquadFilterCommand\n\tinput {:02X}\n\toutput {:02X}\n\tneeds_init ({}, {})\n",
input, output, needs_init[0], needs_init[1]);
}
void MultiTapBiquadFilterCommand::Process(const ADSP::CommandListProcessor& processor) {
if (filter_tap_count > MaxBiquadFilters) {
LOG_ERROR(Service_Audio, "Too many filter taps! {}", filter_tap_count);
filter_tap_count = MaxBiquadFilters;
}
auto input_buffer{
processor.mix_buffers.subspan(input * processor.sample_count, processor.sample_count)};
auto output_buffer{
processor.mix_buffers.subspan(output * processor.sample_count, processor.sample_count)};
// TODO: Fix this, currently just applies the filter to the input twice,
// and doesn't chain the biquads together at all.
for (u32 i = 0; i < filter_tap_count; i++) {
auto state{reinterpret_cast<VoiceState::BiquadFilterState*>(states[i])};
if (needs_init[i]) {
std::memset(state, 0, sizeof(VoiceState::BiquadFilterState));
}
ApplyBiquadFilterFloat(output_buffer, input_buffer, biquads[i].b, biquads[i].a, *state,
processor.sample_count);
}
}
bool MultiTapBiquadFilterCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,59 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/voice/voice_info.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for applying multiple biquads at once.
*/
struct MultiTapBiquadFilterCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer index
s16 input;
/// Output mix buffer index
s16 output;
/// Biquad parameters
std::array<VoiceInfo::BiquadFilterParameter, MaxBiquadFilters> biquads;
/// Biquad states, updated each call
std::array<CpuAddr, MaxBiquadFilters> states;
/// If each biquad needs initialisation
std::array<bool, MaxBiquadFilters> needs_init;
/// Number of active biquads
u8 filter_tap_count;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,433 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <numbers>
#include <ranges>
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/effect/reverb.h"
namespace AudioCore::AudioRenderer {
constexpr std::array<f32, ReverbInfo::MaxDelayLines> FdnMaxDelayLineTimes = {
53.9532470703125f,
79.19256591796875f,
116.23876953125f,
170.61529541015625f,
};
constexpr std::array<f32, ReverbInfo::MaxDelayLines> DecayMaxDelayLineTimes = {
7.0f,
9.0f,
13.0f,
17.0f,
};
constexpr std::array<std::array<f32, ReverbInfo::MaxDelayTaps + 1>, ReverbInfo::NumEarlyModes>
EarlyDelayTimes = {
{{0.000000f, 3.500000f, 2.799988f, 3.899963f, 2.699951f, 13.399963f, 7.899963f, 8.399963f,
9.899963f, 12.000000f, 12.500000f},
{0.000000f, 11.799988f, 5.500000f, 11.199951f, 10.399963f, 38.099976f, 22.199951f,
29.599976f, 21.199951f, 24.799988f, 40.000000f},
{0.000000f, 41.500000f, 20.500000f, 41.299988f, 0.000000f, 29.500000f, 33.799988f,
45.199951f, 46.799988f, 0.000000f, 50.000000f},
{33.099976f, 43.299988f, 22.799988f, 37.899963f, 14.899963f, 35.299988f, 17.899963f,
34.199951f, 0.000000f, 43.299988f, 50.000000f},
{0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f,
0.000000f, 0.000000f, 0.000000f}},
};
constexpr std::array<std::array<f32, ReverbInfo::MaxDelayTaps>, ReverbInfo::NumEarlyModes>
EarlyDelayGains = {{
{0.699951f, 0.679993f, 0.699951f, 0.679993f, 0.699951f, 0.679993f, 0.699951f, 0.679993f,
0.679993f, 0.679993f},
{0.699951f, 0.679993f, 0.699951f, 0.679993f, 0.699951f, 0.679993f, 0.679993f, 0.679993f,
0.679993f, 0.679993f},
{0.500000f, 0.699951f, 0.699951f, 0.679993f, 0.500000f, 0.679993f, 0.679993f, 0.699951f,
0.679993f, 0.000000f},
{0.929993f, 0.919983f, 0.869995f, 0.859985f, 0.939941f, 0.809998f, 0.799988f, 0.769958f,
0.759949f, 0.649963f},
{0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f, 0.000000f,
0.000000f, 0.000000f},
}};
constexpr std::array<std::array<f32, ReverbInfo::MaxDelayLines>, ReverbInfo::NumLateModes>
FdnDelayTimes = {{
{53.953247f, 79.192566f, 116.238770f, 130.615295f},
{53.953247f, 79.192566f, 116.238770f, 170.615295f},
{5.000000f, 10.000000f, 5.000000f, 10.000000f},
{47.029968f, 71.000000f, 103.000000f, 170.000000f},
{53.953247f, 79.192566f, 116.238770f, 170.615295f},
}};
constexpr std::array<std::array<f32, ReverbInfo::MaxDelayLines>, ReverbInfo::NumLateModes>
DecayDelayTimes = {{
{7.000000f, 9.000000f, 13.000000f, 17.000000f},
{7.000000f, 9.000000f, 13.000000f, 17.000000f},
{1.000000f, 1.000000f, 1.000000f, 1.000000f},
{7.000000f, 7.000000f, 13.000000f, 9.000000f},
{7.000000f, 9.000000f, 13.000000f, 17.000000f},
}};
/**
* Update the ReverbInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
*/
static void UpdateReverbEffectParameter(const ReverbInfo::ParameterVersion2& params,
ReverbInfo::State& state) {
const auto pow_10 = [](f32 val) -> f32 {
return (val >= 0.0f) ? 1.0f : (val <= -5.3f) ? 0.0f : std::pow(10.0f, val);
};
const auto cos = [](f32 degrees) -> f32 {
return std::cos(degrees * std::numbers::pi_v<f32> / 180.0f);
};
static bool unk_initialized{false};
static Common::FixedPoint<50, 14> unk_value{};
const auto sample_rate{Common::FixedPoint<50, 14>::from_base(params.sample_rate)};
const auto pre_delay_time{Common::FixedPoint<50, 14>::from_base(params.pre_delay)};
for (u32 i = 0; i < ReverbInfo::MaxDelayTaps; i++) {
auto early_delay{
((pre_delay_time + EarlyDelayTimes[params.early_mode][i]) * sample_rate).to_int()};
early_delay = std::min(early_delay, state.pre_delay_line.sample_count_max);
state.early_delay_times[i] = early_delay + 1;
state.early_gains[i] = Common::FixedPoint<50, 14>::from_base(params.early_gain) *
EarlyDelayGains[params.early_mode][i];
}
if (params.channel_count == 2) {
state.early_gains[4] * 0.5f;
state.early_gains[5] * 0.5f;
}
auto pre_time{
((pre_delay_time + EarlyDelayTimes[params.early_mode][10]) * sample_rate).to_int()};
state.pre_delay_time = std::min(pre_time, state.pre_delay_line.sample_count_max);
if (!unk_initialized) {
unk_value = cos((1280.0f / sample_rate).to_float());
unk_initialized = true;
}
for (u32 i = 0; i < ReverbInfo::MaxDelayLines; i++) {
const auto fdn_delay{(FdnDelayTimes[params.late_mode][i] * sample_rate).to_int()};
state.fdn_delay_lines[i].sample_count =
std::min(fdn_delay, state.fdn_delay_lines[i].sample_count_max);
state.fdn_delay_lines[i].buffer_end =
&state.fdn_delay_lines[i].buffer[state.fdn_delay_lines[i].sample_count - 1];
const auto decay_delay{(DecayDelayTimes[params.late_mode][i] * sample_rate).to_int()};
state.decay_delay_lines[i].sample_count =
std::min(decay_delay, state.decay_delay_lines[i].sample_count_max);
state.decay_delay_lines[i].buffer_end =
&state.decay_delay_lines[i].buffer[state.decay_delay_lines[i].sample_count - 1];
state.decay_delay_lines[i].decay =
0.5999755859375f * (1.0f - Common::FixedPoint<50, 14>::from_base(params.colouration));
auto a{(Common::FixedPoint<50, 14>(state.fdn_delay_lines[i].sample_count_max) +
state.decay_delay_lines[i].sample_count_max) *
-3};
auto b{a / (Common::FixedPoint<50, 14>::from_base(params.decay_time) * sample_rate)};
Common::FixedPoint<50, 14> c{0.0f};
Common::FixedPoint<50, 14> d{0.0f};
auto hf_decay_ratio{Common::FixedPoint<50, 14>::from_base(params.high_freq_decay_ratio)};
if (hf_decay_ratio > 0.99493408203125f) {
c = 0.0f;
d = 1.0f;
} else {
const auto e{
pow_10(((((1.0f / hf_decay_ratio) - 1.0f) * 2) / 100 * (b / 10)).to_float())};
const auto f{1.0f - e};
const auto g{2.0f - (unk_value * e * 2)};
const auto h{std::sqrt(std::pow(g.to_float(), 2.0f) - (std::pow(f, 2.0f) * 4))};
c = (g - h) / (f * 2.0f);
d = 1.0f - c;
}
state.hf_decay_prev_gain[i] = c;
state.hf_decay_gain[i] = pow_10((b / 1000).to_float()) * d * 0.70709228515625f;
state.prev_feedback_output[i] = 0;
}
}
/**
* Initialize a new ReverbInfo state according to the given parameters.
*
* @param params - Input parameters to update the state.
* @param state - State to be updated.
* @param workbuffer - Game-supplied memory for the state. (Unused)
* @param long_size_pre_delay_supported - Use a longer pre-delay time before reverb begins.
*/
static void InitializeReverbEffect(const ReverbInfo::ParameterVersion2& params,
ReverbInfo::State& state, const CpuAddr workbuffer,
const bool long_size_pre_delay_supported) {
state = {};
auto delay{Common::FixedPoint<50, 14>::from_base(params.sample_rate)};
for (u32 i = 0; i < ReverbInfo::MaxDelayLines; i++) {
auto fdn_delay_time{(FdnMaxDelayLineTimes[i] * delay).to_uint_floor()};
state.fdn_delay_lines[i].Initialize(fdn_delay_time, 1.0f);
auto decay_delay_time{(DecayMaxDelayLineTimes[i] * delay).to_uint_floor()};
state.decay_delay_lines[i].Initialize(decay_delay_time, 0.0f);
}
const auto pre_delay{long_size_pre_delay_supported ? 350.0f : 150.0f};
const auto pre_delay_line{(pre_delay * delay).to_uint_floor()};
state.pre_delay_line.Initialize(pre_delay_line, 1.0f);
const auto center_delay_time{(5 * delay).to_uint_floor()};
state.center_delay_line.Initialize(center_delay_time, 1.0f);
UpdateReverbEffectParameter(params, state);
for (u32 i = 0; i < ReverbInfo::MaxDelayLines; i++) {
std::ranges::fill(state.fdn_delay_lines[i].buffer, 0);
std::ranges::fill(state.decay_delay_lines[i].buffer, 0);
}
std::ranges::fill(state.center_delay_line.buffer, 0);
std::ranges::fill(state.pre_delay_line.buffer, 0);
}
/**
* Pass-through the effect, copying input to output directly, with no reverb applied.
*
* @param inputs - Array of input mix buffers to copy.
* @param outputs - Array of output mix buffers to receive copy.
* @param channel_count - Number of channels in inputs and outputs.
* @param sample_count - Number of samples within each channel.
*/
static void ApplyReverbEffectBypass(std::span<std::span<const s32>> inputs,
std::span<std::span<s32>> outputs, const u32 channel_count,
const u32 sample_count) {
for (u32 i = 0; i < channel_count; i++) {
if (inputs[i].data() != outputs[i].data()) {
std::memcpy(outputs[i].data(), inputs[i].data(), outputs[i].size_bytes());
}
}
}
/**
* Tick the delay lines, reading and returning their current output, and writing a new decaying
* sample (mix).
*
* @param decay - The decay line.
* @param fdn - Feedback delay network.
* @param mix - The new calculated sample to be written and decayed.
* @return The next delayed and decayed sample.
*/
static Common::FixedPoint<50, 14> Axfx2AllPassTick(ReverbInfo::ReverbDelayLine& decay,
ReverbInfo::ReverbDelayLine& fdn,
const Common::FixedPoint<50, 14> mix) {
const auto val{decay.Read()};
const auto mixed{mix - (val * decay.decay)};
const auto out{decay.Tick(mixed) + (mixed * decay.decay)};
fdn.Tick(out);
return out;
}
/**
* Impl. Apply a Reverb according to the current state, on the input mix buffers,
* saving the results to the output mix buffers.
*
* @tparam Channels - Number of channels to process. 1-6.
Inputs/outputs should have this many buffers.
* @param params - Input parameters to update the state.
* @param state - State to use, must be initialized (see InitializeReverbEffect).
* @param inputs - Input mix buffers to perform the reverb on.
* @param outputs - Output mix buffers to receive the reverbed samples.
* @param sample_count - Number of samples to process.
*/
template <size_t Channels>
static void ApplyReverbEffect(const ReverbInfo::ParameterVersion2& params, ReverbInfo::State& state,
std::vector<std::span<const s32>>& inputs,
std::vector<std::span<s32>>& outputs, const u32 sample_count) {
constexpr std::array<u8, ReverbInfo::MaxDelayTaps> OutTapIndexes1Ch{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
constexpr std::array<u8, ReverbInfo::MaxDelayTaps> OutTapIndexes2Ch{
0, 0, 1, 1, 0, 1, 0, 0, 1, 1,
};
constexpr std::array<u8, ReverbInfo::MaxDelayTaps> OutTapIndexes4Ch{
0, 0, 1, 1, 0, 1, 2, 2, 3, 3,
};
constexpr std::array<u8, ReverbInfo::MaxDelayTaps> OutTapIndexes6Ch{
0, 0, 1, 1, 4, 4, 2, 2, 3, 3,
};
std::span<const u8> tap_indexes{};
if constexpr (Channels == 1) {
tap_indexes = OutTapIndexes1Ch;
} else if constexpr (Channels == 2) {
tap_indexes = OutTapIndexes2Ch;
} else if constexpr (Channels == 4) {
tap_indexes = OutTapIndexes4Ch;
} else if constexpr (Channels == 6) {
tap_indexes = OutTapIndexes6Ch;
}
for (u32 sample_index = 0; sample_index < sample_count; sample_index++) {
std::array<Common::FixedPoint<50, 14>, Channels> output_samples{};
for (u32 early_tap = 0; early_tap < ReverbInfo::MaxDelayTaps; early_tap++) {
const auto sample{state.pre_delay_line.TapOut(state.early_delay_times[early_tap]) *
state.early_gains[early_tap]};
output_samples[tap_indexes[early_tap]] += sample;
if constexpr (Channels == 6) {
output_samples[5] += sample;
}
}
if constexpr (Channels == 6) {
output_samples[5] *= 0.2f;
}
Common::FixedPoint<50, 14> input_sample{};
for (u32 channel = 0; channel < Channels; channel++) {
input_sample += inputs[channel][sample_index];
}
input_sample *= 64;
input_sample *= Common::FixedPoint<50, 14>::from_base(params.base_gain);
state.pre_delay_line.Write(input_sample);
for (u32 i = 0; i < ReverbInfo::MaxDelayLines; i++) {
state.prev_feedback_output[i] =
state.prev_feedback_output[i] * state.hf_decay_prev_gain[i] +
state.fdn_delay_lines[i].Read() * state.hf_decay_gain[i];
}
Common::FixedPoint<50, 14> pre_delay_sample{
state.pre_delay_line.Read() * Common::FixedPoint<50, 14>::from_base(params.late_gain)};
std::array<Common::FixedPoint<50, 14>, ReverbInfo::MaxDelayLines> mix_matrix{
state.prev_feedback_output[2] + state.prev_feedback_output[1] + pre_delay_sample,
-state.prev_feedback_output[0] - state.prev_feedback_output[3] + pre_delay_sample,
state.prev_feedback_output[0] - state.prev_feedback_output[3] + pre_delay_sample,
state.prev_feedback_output[1] - state.prev_feedback_output[2] + pre_delay_sample,
};
std::array<Common::FixedPoint<50, 14>, ReverbInfo::MaxDelayLines> out_line_samples{};
for (u32 i = 0; i < ReverbInfo::MaxDelayLines; i++) {
out_line_samples[i] = Axfx2AllPassTick(state.decay_delay_lines[i],
state.fdn_delay_lines[i], mix_matrix[i]);
}
const auto dry_gain{Common::FixedPoint<50, 14>::from_base(params.dry_gain)};
const auto wet_gain{Common::FixedPoint<50, 14>::from_base(params.wet_gain)};
const auto out_channels{std::min(Channels, size_t(4))};
for (u32 channel = 0; channel < out_channels; channel++) {
auto in_sample{inputs[channel][channel] * dry_gain};
auto out_sample{((output_samples[channel] + out_line_samples[channel]) * wet_gain) /
64};
outputs[channel][sample_index] = (in_sample + out_sample).to_int();
}
if constexpr (Channels == 6) {
auto center{
state.center_delay_line.Tick((out_line_samples[2] - out_line_samples[3]) * 0.5f)};
auto in_sample{inputs[4][sample_index] * dry_gain};
auto out_sample{((output_samples[4] + center) * wet_gain) / 64};
outputs[4][sample_index] = (in_sample + out_sample).to_int();
in_sample = inputs[5][sample_index] * dry_gain;
out_sample = ((output_samples[5] + out_line_samples[3]) * wet_gain) / 64;
outputs[5][sample_index] = (in_sample + out_sample).to_int();
}
}
}
/**
* Apply a Reverb if enabled, according to the current state, on the input mix buffers,
* saving the results to the output mix buffers.
*
* @param params - Input parameters to use.
* @param state - State to use, must be initialized (see InitializeReverbEffect).
* @param enabled - If enabled, delay will be applied, otherwise input is copied to output.
* @param inputs - Input mix buffers to performan the reverb on.
* @param outputs - Output mix buffers to receive the reverbed samples.
* @param sample_count - Number of samples to process.
*/
static void ApplyReverbEffect(const ReverbInfo::ParameterVersion2& params, ReverbInfo::State& state,
const bool enabled, std::vector<std::span<const s32>>& inputs,
std::vector<std::span<s32>>& outputs, const u32 sample_count) {
if (enabled) {
switch (params.channel_count) {
case 0:
return;
case 1:
ApplyReverbEffect<1>(params, state, inputs, outputs, sample_count);
break;
case 2:
ApplyReverbEffect<2>(params, state, inputs, outputs, sample_count);
break;
case 4:
ApplyReverbEffect<4>(params, state, inputs, outputs, sample_count);
break;
case 6:
ApplyReverbEffect<6>(params, state, inputs, outputs, sample_count);
break;
default:
ApplyReverbEffectBypass(inputs, outputs, params.channel_count, sample_count);
break;
}
} else {
ApplyReverbEffectBypass(inputs, outputs, params.channel_count, sample_count);
}
}
void ReverbCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format(
"ReverbCommand\n\tenabled {} long_size_pre_delay_supported {}\n\tinputs: ", effect_enabled,
long_size_pre_delay_supported);
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", inputs[i]);
}
string += "\n\toutputs: ";
for (u32 i = 0; i < MaxChannels; i++) {
string += fmt::format("{:02X}, ", outputs[i]);
}
string += "\n";
}
void ReverbCommand::Process(const ADSP::CommandListProcessor& processor) {
std::vector<std::span<const s32>> input_buffers(parameter.channel_count);
std::vector<std::span<s32>> output_buffers(parameter.channel_count);
for (u32 i = 0; i < parameter.channel_count; i++) {
input_buffers[i] = processor.mix_buffers.subspan(inputs[i] * processor.sample_count,
processor.sample_count);
output_buffers[i] = processor.mix_buffers.subspan(outputs[i] * processor.sample_count,
processor.sample_count);
}
auto state_{reinterpret_cast<ReverbInfo::State*>(state)};
if (effect_enabled) {
if (parameter.state == ReverbInfo::ParameterState::Updating) {
UpdateReverbEffectParameter(parameter, *state_);
} else if (parameter.state == ReverbInfo::ParameterState::Initialized) {
InitializeReverbEffect(parameter, *state_, workbuffer, long_size_pre_delay_supported);
}
}
ApplyReverbEffect(parameter, *state_, effect_enabled, input_buffers, output_buffers,
processor.sample_count);
}
bool ReverbCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,62 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "audio_core/renderer/effect/effect_reverb_info.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for a Reverb effect. Apply a reverb to inputs mix buffer, outputs receives
* the results.
*/
struct ReverbCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer offsets for each channel
std::array<s16, MaxChannels> inputs;
/// Output mix buffer offsets for each channel
std::array<s16, MaxChannels> outputs;
/// Input parameters
ReverbInfo::ParameterVersion2 parameter;
/// State, updated each call
CpuAddr state;
/// Game-supplied workbuffer (Unused)
CpuAddr workbuffer;
/// Is this effect enabled?
bool effect_enabled;
/// Is a longer pre-delay time supported?
bool long_size_pre_delay_supported;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,92 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "audio_core/common/common.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
enum class CommandId : u8 {
/* 0x00 */ Invalid,
/* 0x01 */ DataSourcePcmInt16Version1,
/* 0x02 */ DataSourcePcmInt16Version2,
/* 0x03 */ DataSourcePcmFloatVersion1,
/* 0x04 */ DataSourcePcmFloatVersion2,
/* 0x05 */ DataSourceAdpcmVersion1,
/* 0x06 */ DataSourceAdpcmVersion2,
/* 0x07 */ Volume,
/* 0x08 */ VolumeRamp,
/* 0x09 */ BiquadFilter,
/* 0x0A */ Mix,
/* 0x0B */ MixRamp,
/* 0x0C */ MixRampGrouped,
/* 0x0D */ DepopPrepare,
/* 0x0E */ DepopForMixBuffers,
/* 0x0F */ Delay,
/* 0x10 */ Upsample,
/* 0x11 */ DownMix6chTo2ch,
/* 0x12 */ Aux,
/* 0x13 */ DeviceSink,
/* 0x14 */ CircularBufferSink,
/* 0x15 */ Reverb,
/* 0x16 */ I3dl2Reverb,
/* 0x17 */ Performance,
/* 0x18 */ ClearMixBuffer,
/* 0x19 */ CopyMixBuffer,
/* 0x1A */ LightLimiterVersion1,
/* 0x1B */ LightLimiterVersion2,
/* 0x1C */ MultiTapBiquadFilter,
/* 0x1D */ Capture,
};
constexpr u32 CommandMagic{0xCAFEBABE};
/**
* A command, generated by the host, and processed by the ADSP's AudioRenderer.
*/
struct ICommand {
virtual ~ICommand() = default;
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
virtual void Dump(const ADSP::CommandListProcessor& processor, std::string& string) = 0;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
virtual void Process(const ADSP::CommandListProcessor& processor) = 0;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
virtual bool Verify(const ADSP::CommandListProcessor& processor) = 0;
/// Command magic 0xCAFEBABE
u32 magic{};
/// Command enabled
bool enabled{};
/// Type of this command (see CommandId)
CommandId type{};
/// Size of this command
s16 size{};
/// Estimated processing time for this command
u32 estimated_process_time{};
/// Node id of the voice or mix this command was generated from
u32 node_id{};
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,24 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <string>
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/mix/clear_mix.h"
namespace AudioCore::AudioRenderer {
void ClearMixBufferCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("ClearMixBufferCommand\n");
}
void ClearMixBufferCommand::Process(const ADSP::CommandListProcessor& processor) {
memset(processor.mix_buffers.data(), 0, processor.mix_buffers.size_bytes());
}
bool ClearMixBufferCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,45 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for a clearing the mix buffers.
* Used at the start of each command list.
*/
struct ClearMixBufferCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
};
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,27 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "audio_core/renderer/adsp/command_list_processor.h"
#include "audio_core/renderer/command/mix/copy_mix.h"
namespace AudioCore::AudioRenderer {
void CopyMixBufferCommand::Dump([[maybe_unused]] const ADSP::CommandListProcessor& processor,
std::string& string) {
string += fmt::format("CopyMixBufferCommand\n\tinput {:02X} output {:02X}\n", input_index,
output_index);
}
void CopyMixBufferCommand::Process(const ADSP::CommandListProcessor& processor) {
auto output{processor.mix_buffers.subspan(output_index * processor.sample_count,
processor.sample_count)};
auto input{processor.mix_buffers.subspan(input_index * processor.sample_count,
processor.sample_count)};
std::memcpy(output.data(), input.data(), processor.sample_count * sizeof(s32));
}
bool CopyMixBufferCommand::Verify(const ADSP::CommandListProcessor& processor) {
return true;
}
} // namespace AudioCore::AudioRenderer

View file

@ -0,0 +1,49 @@
// SPDX-FileCopyrightText: Copyright 2022 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <string>
#include "audio_core/renderer/command/icommand.h"
#include "common/common_types.h"
namespace AudioCore::AudioRenderer {
namespace ADSP {
class CommandListProcessor;
}
/**
* AudioRenderer command for a copying a mix buffer from input to output.
*/
struct CopyMixBufferCommand : ICommand {
/**
* Print this command's information to a string.
*
* @param processor - The CommandListProcessor processing this command.
* @param string - The string to print into.
*/
void Dump(const ADSP::CommandListProcessor& processor, std::string& string) override;
/**
* Process this command.
*
* @param processor - The CommandListProcessor processing this command.
*/
void Process(const ADSP::CommandListProcessor& processor) override;
/**
* Verify this command's data is valid.
*
* @param processor - The CommandListProcessor processing this command.
* @return True if the command is valid, otherwise false.
*/
bool Verify(const ADSP::CommandListProcessor& processor) override;
/// Input mix buffer index
s16 input_index;
/// Output mix buffer index
s16 output_index;
};
} // namespace AudioCore::AudioRenderer

Some files were not shown because too many files have changed in this diff Show more