SDL_emscriptenaudio.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356
  1. /*
  2. Simple DirectMedia Layer
  3. Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
  4. This software is provided 'as-is', without any express or implied
  5. warranty. In no event will the authors be held liable for any damages
  6. arising from the use of this software.
  7. Permission is granted to anyone to use this software for any purpose,
  8. including commercial applications, and to alter it and redistribute it
  9. freely, subject to the following restrictions:
  10. 1. The origin of this software must not be misrepresented; you must not
  11. claim that you wrote the original software. If you use this software
  12. in a product, an acknowledgment in the product documentation would be
  13. appreciated but is not required.
  14. 2. Altered source versions must be plainly marked as such, and must not be
  15. misrepresented as being the original software.
  16. 3. This notice may not be removed or altered from any source distribution.
  17. */
  18. #include "SDL_internal.h"
  19. #ifdef SDL_AUDIO_DRIVER_EMSCRIPTEN
  20. #include "../SDL_sysaudio.h"
  21. #include "SDL_emscriptenaudio.h"
  22. #include <emscripten/emscripten.h>
  23. // just turn off clang-format for this whole file, this INDENT_OFF stuff on
  24. // each EM_ASM section is ugly.
  25. /* *INDENT-OFF* */ // clang-format off
  26. static Uint8 *EMSCRIPTENAUDIO_GetDeviceBuf(SDL_AudioDevice *device, int *buffer_size)
  27. {
  28. return device->hidden->mixbuf;
  29. }
  30. static bool EMSCRIPTENAUDIO_PlayDevice(SDL_AudioDevice *device, const Uint8 *buffer, int buffer_size)
  31. {
  32. const int framelen = SDL_AUDIO_FRAMESIZE(device->spec);
  33. MAIN_THREAD_EM_ASM({
  34. /* Convert incoming buf pointer to a HEAPF32 offset. */
  35. #ifdef __wasm64__
  36. var buf = $0 / 4;
  37. #else
  38. var buf = $0 >>> 2;
  39. #endif
  40. var SDL3 = Module['SDL3'];
  41. var numChannels = SDL3.audio_playback.currentPlaybackBuffer['numberOfChannels'];
  42. for (var c = 0; c < numChannels; ++c) {
  43. var channelData = SDL3.audio_playback.currentPlaybackBuffer['getChannelData'](c);
  44. if (channelData.length != $1) {
  45. throw 'Web Audio playback buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
  46. }
  47. for (var j = 0; j < $1; ++j) {
  48. channelData[j] = HEAPF32[buf + (j * numChannels + c)];
  49. }
  50. }
  51. }, buffer, buffer_size / framelen);
  52. return true;
  53. }
  54. static void EMSCRIPTENAUDIO_FlushRecording(SDL_AudioDevice *device)
  55. {
  56. // Do nothing, the new data will just be dropped.
  57. }
  58. static int EMSCRIPTENAUDIO_RecordDevice(SDL_AudioDevice *device, void *buffer, int buflen)
  59. {
  60. MAIN_THREAD_EM_ASM({
  61. var SDL3 = Module['SDL3'];
  62. var numChannels = SDL3.audio_recording.currentRecordingBuffer.numberOfChannels;
  63. for (var c = 0; c < numChannels; ++c) {
  64. var channelData = SDL3.audio_recording.currentRecordingBuffer.getChannelData(c);
  65. if (channelData.length != $1) {
  66. throw 'Web Audio recording buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
  67. }
  68. if (numChannels == 1) { // fastpath this a little for the common (mono) case.
  69. for (var j = 0; j < $1; ++j) {
  70. setValue($0 + (j * 4), channelData[j], 'float');
  71. }
  72. } else {
  73. for (var j = 0; j < $1; ++j) {
  74. setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float');
  75. }
  76. }
  77. }
  78. }, buffer, (buflen / sizeof(float)) / device->spec.channels);
  79. return buflen;
  80. }
  81. static void EMSCRIPTENAUDIO_CloseDevice(SDL_AudioDevice *device)
  82. {
  83. if (!device->hidden) {
  84. return;
  85. }
  86. MAIN_THREAD_EM_ASM({
  87. var SDL3 = Module['SDL3'];
  88. if ($0) {
  89. if (SDL3.audio_recording.silenceTimer !== undefined) {
  90. clearInterval(SDL3.audio_recording.silenceTimer);
  91. }
  92. if (SDL3.audio_recording.stream !== undefined) {
  93. var tracks = SDL3.audio_recording.stream.getAudioTracks();
  94. for (var i = 0; i < tracks.length; i++) {
  95. SDL3.audio_recording.stream.removeTrack(tracks[i]);
  96. }
  97. }
  98. if (SDL3.audio_recording.scriptProcessorNode !== undefined) {
  99. SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {};
  100. SDL3.audio_recording.scriptProcessorNode.disconnect();
  101. }
  102. if (SDL3.audio_recording.mediaStreamNode !== undefined) {
  103. SDL3.audio_recording.mediaStreamNode.disconnect();
  104. }
  105. SDL3.audio_recording = undefined;
  106. } else {
  107. if (SDL3.audio_playback.scriptProcessorNode != undefined) {
  108. SDL3.audio_playback.scriptProcessorNode.disconnect();
  109. }
  110. if (SDL3.audio_playback.silenceTimer !== undefined) {
  111. clearInterval(SDL3.audio_playback.silenceTimer);
  112. }
  113. SDL3.audio_playback = undefined;
  114. }
  115. if ((SDL3.audioContext !== undefined) && (SDL3.audio_playback === undefined) && (SDL3.audio_recording === undefined)) {
  116. SDL3.audioContext.close();
  117. SDL3.audioContext = undefined;
  118. }
  119. }, device->recording);
  120. SDL_free(device->hidden->mixbuf);
  121. SDL_free(device->hidden);
  122. device->hidden = NULL;
  123. SDL_AudioThreadFinalize(device);
  124. }
  125. EM_JS_DEPS(sdlaudio, "$autoResumeAudioContext,$dynCall");
  126. static bool EMSCRIPTENAUDIO_OpenDevice(SDL_AudioDevice *device)
  127. {
  128. // based on parts of library_sdl.js
  129. // create context
  130. const bool result = MAIN_THREAD_EM_ASM_INT({
  131. if (typeof(Module['SDL3']) === 'undefined') {
  132. Module['SDL3'] = {};
  133. }
  134. var SDL3 = Module['SDL3'];
  135. SDL3.audio_playback = {};
  136. SDL3.audio_recording = {};
  137. if (!SDL3.audioContext) {
  138. if (typeof(AudioContext) !== 'undefined') {
  139. SDL3.audioContext = new AudioContext();
  140. } else if (typeof(webkitAudioContext) !== 'undefined') {
  141. SDL3.audioContext = new webkitAudioContext();
  142. }
  143. if (SDL3.audioContext) {
  144. if ((typeof navigator.userActivation) === 'undefined') {
  145. autoResumeAudioContext(SDL3.audioContext);
  146. }
  147. }
  148. }
  149. return (SDL3.audioContext !== undefined);
  150. });
  151. if (!result) {
  152. return SDL_SetError("Web Audio API is not available!");
  153. }
  154. device->spec.format = SDL_AUDIO_F32; // web audio only supports floats
  155. // Initialize all variables that we clean on shutdown
  156. device->hidden = (struct SDL_PrivateAudioData *)SDL_calloc(1, sizeof(*device->hidden));
  157. if (!device->hidden) {
  158. return false;
  159. }
  160. // limit to native freq
  161. device->spec.freq = MAIN_THREAD_EM_ASM_INT({ return Module['SDL3'].audioContext.sampleRate; });
  162. device->sample_frames = SDL_GetDefaultSampleFramesFromFreq(device->spec.freq) * 2; // double the buffer size, some browsers need more, and we'll just have to live with the latency.
  163. SDL_UpdatedAudioDeviceFormat(device);
  164. if (!device->recording) {
  165. device->hidden->mixbuf = (Uint8 *)SDL_malloc(device->buffer_size);
  166. if (!device->hidden->mixbuf) {
  167. return false;
  168. }
  169. SDL_memset(device->hidden->mixbuf, device->silence_value, device->buffer_size);
  170. }
  171. if (device->recording) {
  172. /* The idea is to take the recording media stream, hook it up to an
  173. audio graph where we can pass it through a ScriptProcessorNode
  174. to access the raw PCM samples and push them to the SDL app's
  175. callback. From there, we "process" the audio data into silence
  176. and forget about it.
  177. This should, strictly speaking, use MediaRecorder for recording, but
  178. this API is cleaner to use and better supported, and fires a
  179. callback whenever there's enough data to fire down into the app.
  180. The downside is that we are spending CPU time silencing a buffer
  181. that the audiocontext uselessly mixes into any playback. On the
  182. upside, both of those things are not only run in native code in
  183. the browser, they're probably SIMD code, too. MediaRecorder
  184. feels like it's a pretty inefficient tapdance in similar ways,
  185. to be honest. */
  186. MAIN_THREAD_EM_ASM({
  187. var SDL3 = Module['SDL3'];
  188. var have_microphone = function(stream) {
  189. //console.log('SDL audio recording: we have a microphone! Replacing silence callback.');
  190. if (SDL3.audio_recording.silenceTimer !== undefined) {
  191. clearInterval(SDL3.audio_recording.silenceTimer);
  192. SDL3.audio_recording.silenceTimer = undefined;
  193. SDL3.audio_recording.silenceBuffer = undefined
  194. }
  195. SDL3.audio_recording.mediaStreamNode = SDL3.audioContext.createMediaStreamSource(stream);
  196. SDL3.audio_recording.scriptProcessorNode = SDL3.audioContext.createScriptProcessor($1, $0, 1);
  197. SDL3.audio_recording.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {
  198. if ((SDL3 === undefined) || (SDL3.audio_recording === undefined)) { return; }
  199. audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0);
  200. SDL3.audio_recording.currentRecordingBuffer = audioProcessingEvent.inputBuffer;
  201. dynCall('ip', $2, [$3]);
  202. };
  203. SDL3.audio_recording.mediaStreamNode.connect(SDL3.audio_recording.scriptProcessorNode);
  204. SDL3.audio_recording.scriptProcessorNode.connect(SDL3.audioContext.destination);
  205. SDL3.audio_recording.stream = stream;
  206. };
  207. var no_microphone = function(error) {
  208. //console.log('SDL audio recording: we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.');
  209. };
  210. // we write silence to the audio callback until the microphone is available (user approves use, etc).
  211. SDL3.audio_recording.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
  212. SDL3.audio_recording.silenceBuffer.getChannelData(0).fill(0.0);
  213. var silence_callback = function() {
  214. SDL3.audio_recording.currentRecordingBuffer = SDL3.audio_recording.silenceBuffer;
  215. dynCall('ip', $2, [$3]);
  216. };
  217. SDL3.audio_recording.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
  218. if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) {
  219. navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone);
  220. } else if (navigator.webkitGetUserMedia !== undefined) {
  221. navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone);
  222. }
  223. }, device->spec.channels, device->sample_frames, SDL_RecordingAudioThreadIterate, device);
  224. } else {
  225. // setup a ScriptProcessorNode
  226. MAIN_THREAD_EM_ASM({
  227. var SDL3 = Module['SDL3'];
  228. SDL3.audio_playback.scriptProcessorNode = SDL3.audioContext['createScriptProcessor']($1, 0, $0);
  229. SDL3.audio_playback.scriptProcessorNode['onaudioprocess'] = function (e) {
  230. if ((SDL3 === undefined) || (SDL3.audio_playback === undefined)) { return; }
  231. // if we're actually running the node, we don't need the fake callback anymore, so kill it.
  232. if (SDL3.audio_playback.silenceTimer !== undefined) {
  233. clearInterval(SDL3.audio_playback.silenceTimer);
  234. SDL3.audio_playback.silenceTimer = undefined;
  235. SDL3.audio_playback.silenceBuffer = undefined;
  236. }
  237. SDL3.audio_playback.currentPlaybackBuffer = e['outputBuffer'];
  238. dynCall('ip', $2, [$3]);
  239. };
  240. SDL3.audio_playback.scriptProcessorNode['connect'](SDL3.audioContext['destination']);
  241. if (SDL3.audioContext.state === 'suspended') { // uhoh, autoplay is blocked.
  242. SDL3.audio_playback.silenceBuffer = SDL3.audioContext.createBuffer($0, $1, SDL3.audioContext.sampleRate);
  243. SDL3.audio_playback.silenceBuffer.getChannelData(0).fill(0.0);
  244. var silence_callback = function() {
  245. if ((typeof navigator.userActivation) !== 'undefined') {
  246. if (navigator.userActivation.hasBeenActive) {
  247. SDL3.audioContext.resume();
  248. }
  249. }
  250. // the buffer that gets filled here just gets ignored, so the app can make progress
  251. // and/or avoid flooding audio queues until we can actually play audio.
  252. SDL3.audio_playback.currentPlaybackBuffer = SDL3.audio_playback.silenceBuffer;
  253. dynCall('ip', $2, [$3]);
  254. SDL3.audio_playback.currentPlaybackBuffer = undefined;
  255. };
  256. SDL3.audio_playback.silenceTimer = setInterval(silence_callback, ($1 / SDL3.audioContext.sampleRate) * 1000);
  257. }
  258. }, device->spec.channels, device->sample_frames, SDL_PlaybackAudioThreadIterate, device);
  259. }
  260. return true;
  261. }
  262. static bool EMSCRIPTENAUDIO_Init(SDL_AudioDriverImpl *impl)
  263. {
  264. bool available, recording_available;
  265. impl->OpenDevice = EMSCRIPTENAUDIO_OpenDevice;
  266. impl->CloseDevice = EMSCRIPTENAUDIO_CloseDevice;
  267. impl->GetDeviceBuf = EMSCRIPTENAUDIO_GetDeviceBuf;
  268. impl->PlayDevice = EMSCRIPTENAUDIO_PlayDevice;
  269. impl->FlushRecording = EMSCRIPTENAUDIO_FlushRecording;
  270. impl->RecordDevice = EMSCRIPTENAUDIO_RecordDevice;
  271. impl->OnlyHasDefaultPlaybackDevice = true;
  272. // technically, this is just runs in idle time in the main thread, but it's close enough to a "thread" for our purposes.
  273. impl->ProvidesOwnCallbackThread = true;
  274. // check availability
  275. available = MAIN_THREAD_EM_ASM_INT({
  276. if (typeof(AudioContext) !== 'undefined') {
  277. return true;
  278. } else if (typeof(webkitAudioContext) !== 'undefined') {
  279. return true;
  280. }
  281. return false;
  282. });
  283. if (!available) {
  284. SDL_SetError("No audio context available");
  285. }
  286. recording_available = available && MAIN_THREAD_EM_ASM_INT({
  287. if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) {
  288. return true;
  289. } else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') {
  290. return true;
  291. }
  292. return false;
  293. });
  294. impl->HasRecordingSupport = recording_available;
  295. impl->OnlyHasDefaultRecordingDevice = recording_available;
  296. return available;
  297. }
  298. AudioBootStrap EMSCRIPTENAUDIO_bootstrap = {
  299. "emscripten", "SDL emscripten audio driver", EMSCRIPTENAUDIO_Init, false, false
  300. };
  301. /* *INDENT-ON* */ // clang-format on
  302. #endif // SDL_AUDIO_DRIVER_EMSCRIPTEN