SDL_audiotypecvt.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. /*
  2. Simple DirectMedia Layer
  3. Copyright (C) 1997-2023 Sam Lantinga <slouken@libsdl.org>
  4. This software is provided 'as-is', without any express or implied
  5. warranty. In no event will the authors be held liable for any damages
  6. arising from the use of this software.
  7. Permission is granted to anyone to use this software for any purpose,
  8. including commercial applications, and to alter it and redistribute it
  9. freely, subject to the following restrictions:
  10. 1. The origin of this software must not be misrepresented; you must not
  11. claim that you wrote the original software. If you use this software
  12. in a product, an acknowledgment in the product documentation would be
  13. appreciated but is not required.
  14. 2. Altered source versions must be plainly marked as such, and must not be
  15. misrepresented as being the original software.
  16. 3. This notice may not be removed or altered from any source distribution.
  17. */
  18. #include "SDL_internal.h"
  19. #include "SDL_audio_c.h"
  20. #ifndef SDL_CPUINFO_DISABLED
  21. #if defined(__x86_64__) && defined(SDL_SSE2_INTRINSICS)
  22. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* x86_64 guarantees SSE2. */
  23. #elif defined(__MACOS__) && defined(SDL_SSE2_INTRINSICS)
  24. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* macOS/Intel guarantees SSE2. */
  25. #elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8) && defined(SDL_NEON_INTRINSICS)
  26. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* ARMv8+ promise NEON. */
  27. #elif defined(__APPLE__) && defined(__ARM_ARCH) && (__ARM_ARCH >= 7) && defined(SDL_NEON_INTRINSICS)
  28. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* All Apple ARMv7 chips promise NEON support. */
  29. #endif
  30. #endif
  31. /* Set to zero if platform is guaranteed to use a SIMD codepath here. */
  32. #if !defined(NEED_SCALAR_CONVERTER_FALLBACKS) || defined(SDL_CPUINFO_DISABLED)
  33. #define NEED_SCALAR_CONVERTER_FALLBACKS 1
  34. #endif
  35. #define DIVBY128 0.0078125f
  36. #define DIVBY32768 0.000030517578125f
  37. #define DIVBY8388607 0.00000011920930376163766f
  38. #if NEED_SCALAR_CONVERTER_FALLBACKS
  39. /* these all convert backwards because (currently) float32 is >= to the size of anything it converts to, so it lets us safely convert in-place. */
  40. #define AUDIOCVT_TOFLOAT_SCALAR(from, fromtype, equation) \
  41. static void SDL_Convert_##from##_to_F32_Scalar(float *dst, const fromtype *src, int num_samples) { \
  42. int i; \
  43. LOG_DEBUG_AUDIO_CONVERT(#from, "F32"); \
  44. for (i = num_samples - 1; i >= 0; --i) { \
  45. dst[i] = equation; \
  46. } \
  47. }
  48. AUDIOCVT_TOFLOAT_SCALAR(S8, Sint8, ((float)src[i]) * DIVBY128)
  49. AUDIOCVT_TOFLOAT_SCALAR(U8, Uint8, (((float)src[i]) * DIVBY128) - 1.0f)
  50. AUDIOCVT_TOFLOAT_SCALAR(S16, Sint16, ((float)src[i]) * DIVBY32768)
  51. AUDIOCVT_TOFLOAT_SCALAR(S32, Sint32, ((float)(src[i] >> 8)) * DIVBY8388607)
  52. #undef AUDIOCVT_FROMFLOAT_SCALAR
  53. /* these all convert forwards because (currently) float32 is >= to the size of anything it converts from, so it lets us safely convert in-place. */
  54. #define AUDIOCVT_FROMFLOAT_SCALAR(to, totype, clampmin, clampmax, equation) \
  55. static void SDL_Convert_F32_to_##to##_Scalar(totype *dst, const float *src, int num_samples) { \
  56. int i; \
  57. LOG_DEBUG_AUDIO_CONVERT("F32", #to); \
  58. for (i = 0; i < num_samples; i++) { \
  59. const float sample = src[i]; \
  60. if (sample >= 1.0f) { \
  61. dst[i] = (totype) (clampmax); \
  62. } else if (sample <= -1.0f) { \
  63. dst[i] = (totype) (clampmin); \
  64. } else { \
  65. dst[i] = (totype) (equation); \
  66. } \
  67. } \
  68. }
  69. AUDIOCVT_FROMFLOAT_SCALAR(S8, Sint8, -128, 127, sample * 127.0f);
  70. AUDIOCVT_FROMFLOAT_SCALAR(U8, Uint8, 0, 255, (sample + 1.0f) * 127.0f);
  71. AUDIOCVT_FROMFLOAT_SCALAR(S16, Sint16, -32768, 32767, sample * 32767.0f);
  72. AUDIOCVT_FROMFLOAT_SCALAR(S32, Sint32, -2147483648LL, 2147483647, ((Sint32)(sample * 8388607.0f)) << 8);
  73. #undef AUDIOCVT_FROMFLOAT_SCALAR
  74. #endif /* NEED_SCALAR_CONVERTER_FALLBACKS */
  75. #ifdef SDL_SSE2_INTRINSICS
  76. static void SDL_TARGETING("sse2") SDL_Convert_S8_to_F32_SSE2(float *dst, const Sint8 *src, int num_samples)
  77. {
  78. int i;
  79. LOG_DEBUG_AUDIO_CONVERT("S8", "F32 (using SSE2)");
  80. src += num_samples - 1;
  81. dst += num_samples - 1;
  82. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  83. for (i = num_samples; i && (((size_t)(dst - 15)) & 15); --i, --src, --dst) {
  84. *dst = ((float)*src) * DIVBY128;
  85. }
  86. src -= 15;
  87. dst -= 15; /* adjust to read SSE blocks from the start. */
  88. SDL_assert(!i || !(((size_t)dst) & 15));
  89. /* Make sure src is aligned too. */
  90. if (!(((size_t)src) & 15)) {
  91. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  92. const __m128i *mmsrc = (const __m128i *)src;
  93. const __m128i zero = _mm_setzero_si128();
  94. const __m128 divby128 = _mm_set1_ps(DIVBY128);
  95. while (i >= 16) { /* 16 * 8-bit */
  96. const __m128i bytes = _mm_load_si128(mmsrc); /* get 16 sint8 into an XMM register. */
  97. /* treat as int16, shift left to clear every other sint16, then back right with sign-extend. Now sint16. */
  98. const __m128i shorts1 = _mm_srai_epi16(_mm_slli_epi16(bytes, 8), 8);
  99. /* right-shift-sign-extend gets us sint16 with the other set of values. */
  100. const __m128i shorts2 = _mm_srai_epi16(bytes, 8);
  101. /* unpack against zero to make these int32, shift to make them sign-extend, convert to float, multiply. Whew! */
  102. const __m128 floats1 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpacklo_epi16(shorts1, zero), 16), 16)), divby128);
  103. const __m128 floats2 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpacklo_epi16(shorts2, zero), 16), 16)), divby128);
  104. const __m128 floats3 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpackhi_epi16(shorts1, zero), 16), 16)), divby128);
  105. const __m128 floats4 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpackhi_epi16(shorts2, zero), 16), 16)), divby128);
  106. /* Interleave back into correct order, store. */
  107. _mm_store_ps(dst, _mm_unpacklo_ps(floats1, floats2));
  108. _mm_store_ps(dst + 4, _mm_unpackhi_ps(floats1, floats2));
  109. _mm_store_ps(dst + 8, _mm_unpacklo_ps(floats3, floats4));
  110. _mm_store_ps(dst + 12, _mm_unpackhi_ps(floats3, floats4));
  111. i -= 16;
  112. mmsrc--;
  113. dst -= 16;
  114. }
  115. src = (const Sint8 *)mmsrc;
  116. }
  117. src += 15;
  118. dst += 15; /* adjust for any scalar finishing. */
  119. /* Finish off any leftovers with scalar operations. */
  120. while (i) {
  121. *dst = ((float)*src) * DIVBY128;
  122. i--;
  123. src--;
  124. dst--;
  125. }
  126. }
  127. static void SDL_TARGETING("sse2") SDL_Convert_U8_to_F32_SSE2(float *dst, const Uint8 *src, int num_samples)
  128. {
  129. int i;
  130. LOG_DEBUG_AUDIO_CONVERT("U8", "F32 (using SSE2)");
  131. src += num_samples - 1;
  132. dst += num_samples - 1;
  133. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  134. for (i = num_samples; i && (((size_t)(dst - 15)) & 15); --i, --src, --dst) {
  135. *dst = (((float)*src) * DIVBY128) - 1.0f;
  136. }
  137. src -= 15;
  138. dst -= 15; /* adjust to read SSE blocks from the start. */
  139. SDL_assert(!i || !(((size_t)dst) & 15));
  140. /* Make sure src is aligned too. */
  141. if (!(((size_t)src) & 15)) {
  142. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  143. const __m128i *mmsrc = (const __m128i *)src;
  144. const __m128i zero = _mm_setzero_si128();
  145. const __m128 divby128 = _mm_set1_ps(DIVBY128);
  146. const __m128 minus1 = _mm_set1_ps(-1.0f);
  147. while (i >= 16) { /* 16 * 8-bit */
  148. const __m128i bytes = _mm_load_si128(mmsrc); /* get 16 uint8 into an XMM register. */
  149. /* treat as int16, shift left to clear every other sint16, then back right with zero-extend. Now uint16. */
  150. const __m128i shorts1 = _mm_srli_epi16(_mm_slli_epi16(bytes, 8), 8);
  151. /* right-shift-zero-extend gets us uint16 with the other set of values. */
  152. const __m128i shorts2 = _mm_srli_epi16(bytes, 8);
  153. /* unpack against zero to make these int32, convert to float, multiply, add. Whew! */
  154. /* Note that AVX2 can do floating point multiply+add in one instruction, fwiw. SSE2 cannot. */
  155. const __m128 floats1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(shorts1, zero)), divby128), minus1);
  156. const __m128 floats2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(shorts2, zero)), divby128), minus1);
  157. const __m128 floats3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(shorts1, zero)), divby128), minus1);
  158. const __m128 floats4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(shorts2, zero)), divby128), minus1);
  159. /* Interleave back into correct order, store. */
  160. _mm_store_ps(dst, _mm_unpacklo_ps(floats1, floats2));
  161. _mm_store_ps(dst + 4, _mm_unpackhi_ps(floats1, floats2));
  162. _mm_store_ps(dst + 8, _mm_unpacklo_ps(floats3, floats4));
  163. _mm_store_ps(dst + 12, _mm_unpackhi_ps(floats3, floats4));
  164. i -= 16;
  165. mmsrc--;
  166. dst -= 16;
  167. }
  168. src = (const Uint8 *)mmsrc;
  169. }
  170. src += 15;
  171. dst += 15; /* adjust for any scalar finishing. */
  172. /* Finish off any leftovers with scalar operations. */
  173. while (i) {
  174. *dst = (((float)*src) * DIVBY128) - 1.0f;
  175. i--;
  176. src--;
  177. dst--;
  178. }
  179. }
  180. static void SDL_TARGETING("sse2") SDL_Convert_S16_to_F32_SSE2(float *dst, const Sint16 *src, int num_samples)
  181. {
  182. int i;
  183. LOG_DEBUG_AUDIO_CONVERT("S16", "F32 (using SSE2)");
  184. src += num_samples - 1;
  185. dst += num_samples - 1;
  186. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  187. for (i = num_samples; i && (((size_t)(dst - 7)) & 15); --i, --src, --dst) {
  188. *dst = ((float)*src) * DIVBY32768;
  189. }
  190. src -= 7;
  191. dst -= 7; /* adjust to read SSE blocks from the start. */
  192. SDL_assert(!i || !(((size_t)dst) & 15));
  193. /* Make sure src is aligned too. */
  194. if (!(((size_t)src) & 15)) {
  195. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  196. const __m128 divby32768 = _mm_set1_ps(DIVBY32768);
  197. while (i >= 8) { /* 8 * 16-bit */
  198. const __m128i ints = _mm_load_si128((__m128i const *)src); /* get 8 sint16 into an XMM register. */
  199. /* treat as int32, shift left to clear every other sint16, then back right with sign-extend. Now sint32. */
  200. const __m128i a = _mm_srai_epi32(_mm_slli_epi32(ints, 16), 16);
  201. /* right-shift-sign-extend gets us sint32 with the other set of values. */
  202. const __m128i b = _mm_srai_epi32(ints, 16);
  203. /* Interleave these back into the right order, convert to float, multiply, store. */
  204. _mm_store_ps(dst, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi32(a, b)), divby32768));
  205. _mm_store_ps(dst + 4, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi32(a, b)), divby32768));
  206. i -= 8;
  207. src -= 8;
  208. dst -= 8;
  209. }
  210. }
  211. src += 7;
  212. dst += 7; /* adjust for any scalar finishing. */
  213. /* Finish off any leftovers with scalar operations. */
  214. while (i) {
  215. *dst = ((float)*src) * DIVBY32768;
  216. i--;
  217. src--;
  218. dst--;
  219. }
  220. }
  221. static void SDL_TARGETING("sse2") SDL_Convert_S32_to_F32_SSE2(float *dst, const Sint32 *src, int num_samples)
  222. {
  223. int i;
  224. LOG_DEBUG_AUDIO_CONVERT("S32", "F32 (using SSE2)");
  225. /* Get dst aligned to 16 bytes */
  226. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  227. *dst = ((float)(*src >> 8)) * DIVBY8388607;
  228. }
  229. SDL_assert(!i || !(((size_t)dst) & 15));
  230. /* Make sure src is aligned too. */
  231. if (!(((size_t)src) & 15)) {
  232. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  233. const __m128 divby8388607 = _mm_set1_ps(DIVBY8388607);
  234. const __m128i *mmsrc = (const __m128i *)src;
  235. while (i >= 4) { /* 4 * sint32 */
  236. /* shift out lowest bits so int fits in a float32. Small precision loss, but much faster. */
  237. _mm_store_ps(dst, _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_load_si128(mmsrc), 8)), divby8388607));
  238. i -= 4;
  239. mmsrc++;
  240. dst += 4;
  241. }
  242. src = (const Sint32 *)mmsrc;
  243. }
  244. /* Finish off any leftovers with scalar operations. */
  245. while (i) {
  246. *dst = ((float)(*src >> 8)) * DIVBY8388607;
  247. i--;
  248. src++;
  249. dst++;
  250. }
  251. }
  252. static void SDL_TARGETING("sse2") SDL_Convert_F32_to_S8_SSE2(Sint8 *dst, const float *src, int num_samples)
  253. {
  254. int i;
  255. LOG_DEBUG_AUDIO_CONVERT("F32", "S8 (using SSE2)");
  256. /* Get dst aligned to 16 bytes */
  257. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  258. const float sample = *src;
  259. if (sample >= 1.0f) {
  260. *dst = 127;
  261. } else if (sample <= -1.0f) {
  262. *dst = -128;
  263. } else {
  264. *dst = (Sint8)(sample * 127.0f);
  265. }
  266. }
  267. SDL_assert(!i || !(((size_t)dst) & 15));
  268. /* Make sure src is aligned too. */
  269. if (!(((size_t)src) & 15)) {
  270. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  271. const __m128 one = _mm_set1_ps(1.0f);
  272. const __m128 negone = _mm_set1_ps(-1.0f);
  273. const __m128 mulby127 = _mm_set1_ps(127.0f);
  274. __m128i *mmdst = (__m128i *)dst;
  275. while (i >= 16) { /* 16 * float32 */
  276. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  277. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 4)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  278. const __m128i ints3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 8)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  279. const __m128i ints4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 12)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  280. _mm_store_si128(mmdst, _mm_packs_epi16(_mm_packs_epi32(ints1, ints2), _mm_packs_epi32(ints3, ints4))); /* pack down, store out. */
  281. i -= 16;
  282. src += 16;
  283. mmdst++;
  284. }
  285. dst = (Sint8 *)mmdst;
  286. }
  287. /* Finish off any leftovers with scalar operations. */
  288. while (i) {
  289. const float sample = *src;
  290. if (sample >= 1.0f) {
  291. *dst = 127;
  292. } else if (sample <= -1.0f) {
  293. *dst = -128;
  294. } else {
  295. *dst = (Sint8)(sample * 127.0f);
  296. }
  297. i--;
  298. src++;
  299. dst++;
  300. }
  301. }
  302. static void SDL_TARGETING("sse2") SDL_Convert_F32_to_U8_SSE2(Uint8 *dst, const float *src, int num_samples)
  303. {
  304. int i;
  305. LOG_DEBUG_AUDIO_CONVERT("F32", "U8 (using SSE2)");
  306. /* Get dst aligned to 16 bytes */
  307. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  308. const float sample = *src;
  309. if (sample >= 1.0f) {
  310. *dst = 255;
  311. } else if (sample <= -1.0f) {
  312. *dst = 0;
  313. } else {
  314. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  315. }
  316. }
  317. SDL_assert(!i || !(((size_t)dst) & 15));
  318. /* Make sure src is aligned too. */
  319. if (!(((size_t)src) & 15)) {
  320. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  321. const __m128 one = _mm_set1_ps(1.0f);
  322. const __m128 negone = _mm_set1_ps(-1.0f);
  323. const __m128 mulby127 = _mm_set1_ps(127.0f);
  324. __m128i *mmdst = (__m128i *)dst;
  325. while (i >= 16) { /* 16 * float32 */
  326. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  327. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 4)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  328. const __m128i ints3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 8)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  329. const __m128i ints4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 12)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  330. _mm_store_si128(mmdst, _mm_packus_epi16(_mm_packs_epi32(ints1, ints2), _mm_packs_epi32(ints3, ints4))); /* pack down, store out. */
  331. i -= 16;
  332. src += 16;
  333. mmdst++;
  334. }
  335. dst = (Uint8 *)mmdst;
  336. }
  337. /* Finish off any leftovers with scalar operations. */
  338. while (i) {
  339. const float sample = *src;
  340. if (sample >= 1.0f) {
  341. *dst = 255;
  342. } else if (sample <= -1.0f) {
  343. *dst = 0;
  344. } else {
  345. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  346. }
  347. i--;
  348. src++;
  349. dst++;
  350. }
  351. }
  352. static void SDL_TARGETING("sse2") SDL_Convert_F32_to_S16_SSE2(Sint16 *dst, const float *src, int num_samples)
  353. {
  354. int i;
  355. LOG_DEBUG_AUDIO_CONVERT("F32", "S16 (using SSE2)");
  356. /* Get dst aligned to 16 bytes */
  357. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  358. const float sample = *src;
  359. if (sample >= 1.0f) {
  360. *dst = 32767;
  361. } else if (sample <= -1.0f) {
  362. *dst = -32768;
  363. } else {
  364. *dst = (Sint16)(sample * 32767.0f);
  365. }
  366. }
  367. SDL_assert(!i || !(((size_t)dst) & 15));
  368. /* Make sure src is aligned too. */
  369. if (!(((size_t)src) & 15)) {
  370. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  371. const __m128 one = _mm_set1_ps(1.0f);
  372. const __m128 negone = _mm_set1_ps(-1.0f);
  373. const __m128 mulby32767 = _mm_set1_ps(32767.0f);
  374. __m128i *mmdst = (__m128i *)dst;
  375. while (i >= 8) { /* 8 * float32 */
  376. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  377. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src + 4)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  378. _mm_store_si128(mmdst, _mm_packs_epi32(ints1, ints2)); /* pack to sint16, store out. */
  379. i -= 8;
  380. src += 8;
  381. mmdst++;
  382. }
  383. dst = (Sint16 *)mmdst;
  384. }
  385. /* Finish off any leftovers with scalar operations. */
  386. while (i) {
  387. const float sample = *src;
  388. if (sample >= 1.0f) {
  389. *dst = 32767;
  390. } else if (sample <= -1.0f) {
  391. *dst = -32768;
  392. } else {
  393. *dst = (Sint16)(sample * 32767.0f);
  394. }
  395. i--;
  396. src++;
  397. dst++;
  398. }
  399. }
  400. static void SDL_TARGETING("sse2") SDL_Convert_F32_to_S32_SSE2(Sint32 *dst, const float *src, int num_samples)
  401. {
  402. int i;
  403. LOG_DEBUG_AUDIO_CONVERT("F32", "S32 (using SSE2)");
  404. /* Get dst aligned to 16 bytes */
  405. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  406. const float sample = *src;
  407. if (sample >= 1.0f) {
  408. *dst = 2147483647;
  409. } else if (sample <= -1.0f) {
  410. *dst = (Sint32)-2147483648LL;
  411. } else {
  412. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  413. }
  414. }
  415. SDL_assert(!i || !(((size_t)dst) & 15));
  416. SDL_assert(!i || !(((size_t)src) & 15));
  417. {
  418. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  419. const __m128 one = _mm_set1_ps(1.0f);
  420. const __m128 negone = _mm_set1_ps(-1.0f);
  421. const __m128 mulby8388607 = _mm_set1_ps(8388607.0f);
  422. __m128i *mmdst = (__m128i *)dst;
  423. while (i >= 4) { /* 4 * float32 */
  424. _mm_store_si128(mmdst, _mm_slli_epi32(_mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby8388607)), 8)); /* load 4 floats, clamp, convert to sint32 */
  425. i -= 4;
  426. src += 4;
  427. mmdst++;
  428. }
  429. dst = (Sint32 *)mmdst;
  430. }
  431. /* Finish off any leftovers with scalar operations. */
  432. while (i) {
  433. const float sample = *src;
  434. if (sample >= 1.0f) {
  435. *dst = 2147483647;
  436. } else if (sample <= -1.0f) {
  437. *dst = (Sint32)-2147483648LL;
  438. } else {
  439. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  440. }
  441. i--;
  442. src++;
  443. dst++;
  444. }
  445. }
  446. #endif
  447. #ifdef SDL_NEON_INTRINSICS
  448. static void SDL_Convert_S8_to_F32_NEON(float *dst, const Sint8 *src, int num_samples)
  449. {
  450. int i;
  451. LOG_DEBUG_AUDIO_CONVERT("S8", "F32 (using NEON)");
  452. src += num_samples - 1;
  453. dst += num_samples - 1;
  454. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  455. for (i = num_samples; i && (((size_t)(dst - 15)) & 15); --i, --src, --dst) {
  456. *dst = ((float)*src) * DIVBY128;
  457. }
  458. src -= 15;
  459. dst -= 15; /* adjust to read NEON blocks from the start. */
  460. SDL_assert(!i || !(((size_t)dst) & 15));
  461. /* Make sure src is aligned too. */
  462. if (!(((size_t)src) & 15)) {
  463. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  464. const int8_t *mmsrc = (const int8_t *)src;
  465. const float32x4_t divby128 = vdupq_n_f32(DIVBY128);
  466. while (i >= 16) { /* 16 * 8-bit */
  467. const int8x16_t bytes = vld1q_s8(mmsrc); /* get 16 sint8 into a NEON register. */
  468. const int16x8_t int16hi = vmovl_s8(vget_high_s8(bytes)); /* convert top 8 bytes to 8 int16 */
  469. const int16x8_t int16lo = vmovl_s8(vget_low_s8(bytes)); /* convert bottom 8 bytes to 8 int16 */
  470. /* split int16 to two int32, then convert to float, then multiply to normalize, store. */
  471. vst1q_f32(dst, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(int16lo))), divby128));
  472. vst1q_f32(dst + 4, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(int16lo))), divby128));
  473. vst1q_f32(dst + 8, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(int16hi))), divby128));
  474. vst1q_f32(dst + 12, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(int16hi))), divby128));
  475. i -= 16;
  476. mmsrc -= 16;
  477. dst -= 16;
  478. }
  479. src = (const Sint8 *)mmsrc;
  480. }
  481. src += 15;
  482. dst += 15; /* adjust for any scalar finishing. */
  483. /* Finish off any leftovers with scalar operations. */
  484. while (i) {
  485. *dst = ((float)*src) * DIVBY128;
  486. i--;
  487. src--;
  488. dst--;
  489. }
  490. }
  491. static void SDL_Convert_U8_to_F32_NEON(float *dst, const Uint8 *src, int num_samples)
  492. {
  493. int i;
  494. LOG_DEBUG_AUDIO_CONVERT("U8", "F32 (using NEON)");
  495. src += num_samples - 1;
  496. dst += num_samples - 1;
  497. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  498. for (i = num_samples; i && (((size_t)(dst - 15)) & 15); --i, --src, --dst) {
  499. *dst = (((float)*src) * DIVBY128) - 1.0f;
  500. }
  501. src -= 15;
  502. dst -= 15; /* adjust to read NEON blocks from the start. */
  503. SDL_assert(!i || !(((size_t)dst) & 15));
  504. /* Make sure src is aligned too. */
  505. if (!(((size_t)src) & 15)) {
  506. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  507. const uint8_t *mmsrc = (const uint8_t *)src;
  508. const float32x4_t divby128 = vdupq_n_f32(DIVBY128);
  509. const float32x4_t negone = vdupq_n_f32(-1.0f);
  510. while (i >= 16) { /* 16 * 8-bit */
  511. const uint8x16_t bytes = vld1q_u8(mmsrc); /* get 16 uint8 into a NEON register. */
  512. const uint16x8_t uint16hi = vmovl_u8(vget_high_u8(bytes)); /* convert top 8 bytes to 8 uint16 */
  513. const uint16x8_t uint16lo = vmovl_u8(vget_low_u8(bytes)); /* convert bottom 8 bytes to 8 uint16 */
  514. /* split uint16 to two uint32, then convert to float, then multiply to normalize, subtract to adjust for sign, store. */
  515. vst1q_f32(dst, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_low_u16(uint16lo))), divby128));
  516. vst1q_f32(dst + 4, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_high_u16(uint16lo))), divby128));
  517. vst1q_f32(dst + 8, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_low_u16(uint16hi))), divby128));
  518. vst1q_f32(dst + 12, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_high_u16(uint16hi))), divby128));
  519. i -= 16;
  520. mmsrc -= 16;
  521. dst -= 16;
  522. }
  523. src = (const Uint8 *)mmsrc;
  524. }
  525. src += 15;
  526. dst += 15; /* adjust for any scalar finishing. */
  527. /* Finish off any leftovers with scalar operations. */
  528. while (i) {
  529. *dst = (((float)*src) * DIVBY128) - 1.0f;
  530. i--;
  531. src--;
  532. dst--;
  533. }
  534. }
  535. static void SDL_Convert_S16_to_F32_NEON(float *dst, const Sint16 *src, int num_samples)
  536. {
  537. int i;
  538. LOG_DEBUG_AUDIO_CONVERT("S16", "F32 (using NEON)");
  539. src += num_samples - 1;
  540. dst += num_samples - 1;
  541. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  542. for (i = num_samples; i && (((size_t)(dst - 7)) & 15); --i, --src, --dst) {
  543. *dst = ((float)*src) * DIVBY32768;
  544. }
  545. src -= 7;
  546. dst -= 7; /* adjust to read NEON blocks from the start. */
  547. SDL_assert(!i || !(((size_t)dst) & 15));
  548. /* Make sure src is aligned too. */
  549. if (!(((size_t)src) & 15)) {
  550. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  551. const float32x4_t divby32768 = vdupq_n_f32(DIVBY32768);
  552. while (i >= 8) { /* 8 * 16-bit */
  553. const int16x8_t ints = vld1q_s16((int16_t const *)src); /* get 8 sint16 into a NEON register. */
  554. /* split int16 to two int32, then convert to float, then multiply to normalize, store. */
  555. vst1q_f32(dst, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(ints))), divby32768));
  556. vst1q_f32(dst + 4, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(ints))), divby32768));
  557. i -= 8;
  558. src -= 8;
  559. dst -= 8;
  560. }
  561. }
  562. src += 7;
  563. dst += 7; /* adjust for any scalar finishing. */
  564. /* Finish off any leftovers with scalar operations. */
  565. while (i) {
  566. *dst = ((float)*src) * DIVBY32768;
  567. i--;
  568. src--;
  569. dst--;
  570. }
  571. }
  572. static void SDL_Convert_S32_to_F32_NEON(float *dst, const Sint32 *src, int num_samples)
  573. {
  574. int i;
  575. LOG_DEBUG_AUDIO_CONVERT("S32", "F32 (using NEON)");
  576. /* Get dst aligned to 16 bytes */
  577. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  578. *dst = ((float)(*src >> 8)) * DIVBY8388607;
  579. }
  580. SDL_assert(!i || !(((size_t)dst) & 15));
  581. /* Make sure src is aligned too. */
  582. if (!(((size_t)src) & 15)) {
  583. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  584. const float32x4_t divby8388607 = vdupq_n_f32(DIVBY8388607);
  585. const int32_t *mmsrc = (const int32_t *)src;
  586. while (i >= 4) { /* 4 * sint32 */
  587. /* shift out lowest bits so int fits in a float32. Small precision loss, but much faster. */
  588. vst1q_f32(dst, vmulq_f32(vcvtq_f32_s32(vshrq_n_s32(vld1q_s32(mmsrc), 8)), divby8388607));
  589. i -= 4;
  590. mmsrc += 4;
  591. dst += 4;
  592. }
  593. src = (const Sint32 *)mmsrc;
  594. }
  595. /* Finish off any leftovers with scalar operations. */
  596. while (i) {
  597. *dst = ((float)(*src >> 8)) * DIVBY8388607;
  598. i--;
  599. src++;
  600. dst++;
  601. }
  602. }
  603. static void SDL_Convert_F32_to_S8_NEON(Sint8 *dst, const float *src, int num_samples)
  604. {
  605. int i;
  606. LOG_DEBUG_AUDIO_CONVERT("F32", "S8 (using NEON)");
  607. /* Get dst aligned to 16 bytes */
  608. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  609. const float sample = *src;
  610. if (sample >= 1.0f) {
  611. *dst = 127;
  612. } else if (sample <= -1.0f) {
  613. *dst = -128;
  614. } else {
  615. *dst = (Sint8)(sample * 127.0f);
  616. }
  617. }
  618. SDL_assert(!i || !(((size_t)dst) & 15));
  619. /* Make sure src is aligned too. */
  620. if (!(((size_t)src) & 15)) {
  621. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  622. const float32x4_t one = vdupq_n_f32(1.0f);
  623. const float32x4_t negone = vdupq_n_f32(-1.0f);
  624. const float32x4_t mulby127 = vdupq_n_f32(127.0f);
  625. int8_t *mmdst = (int8_t *)dst;
  626. while (i >= 16) { /* 16 * float32 */
  627. const int32x4_t ints1 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  628. const int32x4_t ints2 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 4)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  629. const int32x4_t ints3 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 8)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  630. const int32x4_t ints4 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 12)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  631. const int8x8_t i8lo = vmovn_s16(vcombine_s16(vmovn_s32(ints1), vmovn_s32(ints2))); /* narrow to sint16, combine, narrow to sint8 */
  632. const int8x8_t i8hi = vmovn_s16(vcombine_s16(vmovn_s32(ints3), vmovn_s32(ints4))); /* narrow to sint16, combine, narrow to sint8 */
  633. vst1q_s8(mmdst, vcombine_s8(i8lo, i8hi)); /* combine to int8x16_t, store out */
  634. i -= 16;
  635. src += 16;
  636. mmdst += 16;
  637. }
  638. dst = (Sint8 *)mmdst;
  639. }
  640. /* Finish off any leftovers with scalar operations. */
  641. while (i) {
  642. const float sample = *src;
  643. if (sample >= 1.0f) {
  644. *dst = 127;
  645. } else if (sample <= -1.0f) {
  646. *dst = -128;
  647. } else {
  648. *dst = (Sint8)(sample * 127.0f);
  649. }
  650. i--;
  651. src++;
  652. dst++;
  653. }
  654. }
  655. static void SDL_Convert_F32_to_U8_NEON(Uint8 *dst, const float *src, int num_samples)
  656. {
  657. int i;
  658. LOG_DEBUG_AUDIO_CONVERT("F32", "U8 (using NEON)");
  659. /* Get dst aligned to 16 bytes */
  660. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  661. const float sample = *src;
  662. if (sample >= 1.0f) {
  663. *dst = 255;
  664. } else if (sample <= -1.0f) {
  665. *dst = 0;
  666. } else {
  667. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  668. }
  669. }
  670. SDL_assert(!i || !(((size_t)dst) & 15));
  671. /* Make sure src is aligned too. */
  672. if (!(((size_t)src) & 15)) {
  673. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  674. const float32x4_t one = vdupq_n_f32(1.0f);
  675. const float32x4_t negone = vdupq_n_f32(-1.0f);
  676. const float32x4_t mulby127 = vdupq_n_f32(127.0f);
  677. uint8_t *mmdst = (uint8_t *)dst;
  678. while (i >= 16) { /* 16 * float32 */
  679. const uint32x4_t uints1 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  680. const uint32x4_t uints2 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 4)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  681. const uint32x4_t uints3 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 8)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  682. const uint32x4_t uints4 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 12)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  683. const uint8x8_t ui8lo = vmovn_u16(vcombine_u16(vmovn_u32(uints1), vmovn_u32(uints2))); /* narrow to uint16, combine, narrow to uint8 */
  684. const uint8x8_t ui8hi = vmovn_u16(vcombine_u16(vmovn_u32(uints3), vmovn_u32(uints4))); /* narrow to uint16, combine, narrow to uint8 */
  685. vst1q_u8(mmdst, vcombine_u8(ui8lo, ui8hi)); /* combine to uint8x16_t, store out */
  686. i -= 16;
  687. src += 16;
  688. mmdst += 16;
  689. }
  690. dst = (Uint8 *)mmdst;
  691. }
  692. /* Finish off any leftovers with scalar operations. */
  693. while (i) {
  694. const float sample = *src;
  695. if (sample >= 1.0f) {
  696. *dst = 255;
  697. } else if (sample <= -1.0f) {
  698. *dst = 0;
  699. } else {
  700. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  701. }
  702. i--;
  703. src++;
  704. dst++;
  705. }
  706. }
  707. static void SDL_Convert_F32_to_S16_NEON(Sint16 *dst, const float *src, int num_samples)
  708. {
  709. int i;
  710. LOG_DEBUG_AUDIO_CONVERT("F32", "S16 (using NEON)");
  711. /* Get dst aligned to 16 bytes */
  712. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  713. const float sample = *src;
  714. if (sample >= 1.0f) {
  715. *dst = 32767;
  716. } else if (sample <= -1.0f) {
  717. *dst = -32768;
  718. } else {
  719. *dst = (Sint16)(sample * 32767.0f);
  720. }
  721. }
  722. SDL_assert(!i || !(((size_t)dst) & 15));
  723. /* Make sure src is aligned too. */
  724. if (!(((size_t)src) & 15)) {
  725. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  726. const float32x4_t one = vdupq_n_f32(1.0f);
  727. const float32x4_t negone = vdupq_n_f32(-1.0f);
  728. const float32x4_t mulby32767 = vdupq_n_f32(32767.0f);
  729. int16_t *mmdst = (int16_t *)dst;
  730. while (i >= 8) { /* 8 * float32 */
  731. const int32x4_t ints1 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  732. const int32x4_t ints2 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src + 4)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  733. vst1q_s16(mmdst, vcombine_s16(vmovn_s32(ints1), vmovn_s32(ints2))); /* narrow to sint16, combine, store out. */
  734. i -= 8;
  735. src += 8;
  736. mmdst += 8;
  737. }
  738. dst = (Sint16 *)mmdst;
  739. }
  740. /* Finish off any leftovers with scalar operations. */
  741. while (i) {
  742. const float sample = *src;
  743. if (sample >= 1.0f) {
  744. *dst = 32767;
  745. } else if (sample <= -1.0f) {
  746. *dst = -32768;
  747. } else {
  748. *dst = (Sint16)(sample * 32767.0f);
  749. }
  750. i--;
  751. src++;
  752. dst++;
  753. }
  754. }
  755. static void SDL_Convert_F32_to_S32_NEON(Sint32 *dst, const float *src, int num_samples)
  756. {
  757. int i;
  758. LOG_DEBUG_AUDIO_CONVERT("F32", "S32 (using NEON)");
  759. /* Get dst aligned to 16 bytes */
  760. for (i = num_samples; i && (((size_t)dst) & 15); --i, ++src, ++dst) {
  761. const float sample = *src;
  762. if (sample >= 1.0f) {
  763. *dst = 2147483647;
  764. } else if (sample <= -1.0f) {
  765. *dst = (-2147483647) - 1;
  766. } else {
  767. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  768. }
  769. }
  770. SDL_assert(!i || !(((size_t)dst) & 15));
  771. SDL_assert(!i || !(((size_t)src) & 15));
  772. {
  773. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  774. const float32x4_t one = vdupq_n_f32(1.0f);
  775. const float32x4_t negone = vdupq_n_f32(-1.0f);
  776. const float32x4_t mulby8388607 = vdupq_n_f32(8388607.0f);
  777. int32_t *mmdst = (int32_t *)dst;
  778. while (i >= 4) { /* 4 * float32 */
  779. vst1q_s32(mmdst, vshlq_n_s32(vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), mulby8388607)), 8));
  780. i -= 4;
  781. src += 4;
  782. mmdst += 4;
  783. }
  784. dst = (Sint32 *)mmdst;
  785. }
  786. /* Finish off any leftovers with scalar operations. */
  787. while (i) {
  788. const float sample = *src;
  789. if (sample >= 1.0f) {
  790. *dst = 2147483647;
  791. } else if (sample <= -1.0f) {
  792. *dst = (-2147483647) - 1;
  793. } else {
  794. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  795. }
  796. i--;
  797. src++;
  798. dst++;
  799. }
  800. }
  801. #endif
  802. /* Function pointers set to a CPU-specific implementation. */
  803. void (*SDL_Convert_S8_to_F32)(float *dst, const Sint8 *src, int num_samples) = NULL;
  804. void (*SDL_Convert_U8_to_F32)(float *dst, const Uint8 *src, int num_samples) = NULL;
  805. void (*SDL_Convert_S16_to_F32)(float *dst, const Sint16 *src, int num_samples) = NULL;
  806. void (*SDL_Convert_S32_to_F32)(float *dst, const Sint32 *src, int num_samples) = NULL;
  807. void (*SDL_Convert_F32_to_S8)(Sint8 *dst, const float *src, int num_samples) = NULL;
  808. void (*SDL_Convert_F32_to_U8)(Uint8 *dst, const float *src, int num_samples) = NULL;
  809. void (*SDL_Convert_F32_to_S16)(Sint16 *dst, const float *src, int num_samples) = NULL;
  810. void (*SDL_Convert_F32_to_S32)(Sint32 *dst, const float *src, int num_samples) = NULL;
  811. void SDL_ChooseAudioConverters(void)
  812. {
  813. static SDL_bool converters_chosen = SDL_FALSE;
  814. if (converters_chosen) {
  815. return;
  816. }
  817. #define SET_CONVERTER_FUNCS(fntype) \
  818. SDL_Convert_S8_to_F32 = SDL_Convert_S8_to_F32_##fntype; \
  819. SDL_Convert_U8_to_F32 = SDL_Convert_U8_to_F32_##fntype; \
  820. SDL_Convert_S16_to_F32 = SDL_Convert_S16_to_F32_##fntype; \
  821. SDL_Convert_S32_to_F32 = SDL_Convert_S32_to_F32_##fntype; \
  822. SDL_Convert_F32_to_S8 = SDL_Convert_F32_to_S8_##fntype; \
  823. SDL_Convert_F32_to_U8 = SDL_Convert_F32_to_U8_##fntype; \
  824. SDL_Convert_F32_to_S16 = SDL_Convert_F32_to_S16_##fntype; \
  825. SDL_Convert_F32_to_S32 = SDL_Convert_F32_to_S32_##fntype; \
  826. converters_chosen = SDL_TRUE
  827. #ifdef SDL_SSE2_INTRINSICS
  828. if (SDL_HasSSE2()) {
  829. SET_CONVERTER_FUNCS(SSE2);
  830. return;
  831. }
  832. #endif
  833. #ifdef SDL_NEON_INTRINSICS
  834. if (SDL_HasNEON()) {
  835. SET_CONVERTER_FUNCS(NEON);
  836. return;
  837. }
  838. #endif
  839. #if NEED_SCALAR_CONVERTER_FALLBACKS
  840. SET_CONVERTER_FUNCS(Scalar);
  841. #endif
  842. #undef SET_CONVERTER_FUNCS
  843. SDL_assert(converters_chosen == SDL_TRUE);
  844. }