SDL_audiotypecvt.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. /*
  2. Simple DirectMedia Layer
  3. Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org>
  4. This software is provided 'as-is', without any express or implied
  5. warranty. In no event will the authors be held liable for any damages
  6. arising from the use of this software.
  7. Permission is granted to anyone to use this software for any purpose,
  8. including commercial applications, and to alter it and redistribute it
  9. freely, subject to the following restrictions:
  10. 1. The origin of this software must not be misrepresented; you must not
  11. claim that you wrote the original software. If you use this software
  12. in a product, an acknowledgment in the product documentation would be
  13. appreciated but is not required.
  14. 2. Altered source versions must be plainly marked as such, and must not be
  15. misrepresented as being the original software.
  16. 3. This notice may not be removed or altered from any source distribution.
  17. */
  18. #include "../SDL_internal.h"
  19. #include "SDL_audio.h"
  20. #include "SDL_audio_c.h"
  21. #include "SDL_cpuinfo.h"
  22. #ifdef __ARM_NEON
  23. #define HAVE_NEON_INTRINSICS 1
  24. #endif
  25. #ifdef __SSE2__
  26. #define HAVE_SSE2_INTRINSICS 1
  27. #endif
  28. #if defined(__x86_64__) && HAVE_SSE2_INTRINSICS
  29. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* x86_64 guarantees SSE2. */
  30. #elif __MACOSX__ && HAVE_SSE2_INTRINSICS
  31. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* Mac OS X/Intel guarantees SSE2. */
  32. #elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8) && HAVE_NEON_INTRINSICS
  33. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* ARMv8+ promise NEON. */
  34. #elif defined(__APPLE__) && defined(__ARM_ARCH) && (__ARM_ARCH >= 7) && HAVE_NEON_INTRINSICS
  35. #define NEED_SCALAR_CONVERTER_FALLBACKS 0 /* All Apple ARMv7 chips promise NEON support. */
  36. #endif
  37. /* Set to zero if platform is guaranteed to use a SIMD codepath here. */
  38. #ifndef NEED_SCALAR_CONVERTER_FALLBACKS
  39. #define NEED_SCALAR_CONVERTER_FALLBACKS 1
  40. #endif
  41. /* Function pointers set to a CPU-specific implementation. */
  42. SDL_AudioFilter SDL_Convert_S8_to_F32 = NULL;
  43. SDL_AudioFilter SDL_Convert_U8_to_F32 = NULL;
  44. SDL_AudioFilter SDL_Convert_S16_to_F32 = NULL;
  45. SDL_AudioFilter SDL_Convert_U16_to_F32 = NULL;
  46. SDL_AudioFilter SDL_Convert_S32_to_F32 = NULL;
  47. SDL_AudioFilter SDL_Convert_F32_to_S8 = NULL;
  48. SDL_AudioFilter SDL_Convert_F32_to_U8 = NULL;
  49. SDL_AudioFilter SDL_Convert_F32_to_S16 = NULL;
  50. SDL_AudioFilter SDL_Convert_F32_to_U16 = NULL;
  51. SDL_AudioFilter SDL_Convert_F32_to_S32 = NULL;
  52. #define DIVBY128 0.0078125f
  53. #define DIVBY32768 0.000030517578125f
  54. #define DIVBY8388607 0.00000011920930376163766f
  55. #if NEED_SCALAR_CONVERTER_FALLBACKS
  56. static void SDLCALL
  57. SDL_Convert_S8_to_F32_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  58. {
  59. const Sint8 *src = ((const Sint8 *) (cvt->buf + cvt->len_cvt)) - 1;
  60. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 4)) - 1;
  61. int i;
  62. LOG_DEBUG_CONVERT("AUDIO_S8", "AUDIO_F32");
  63. for (i = cvt->len_cvt; i; --i, --src, --dst) {
  64. *dst = ((float) *src) * DIVBY128;
  65. }
  66. cvt->len_cvt *= 4;
  67. if (cvt->filters[++cvt->filter_index]) {
  68. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  69. }
  70. }
  71. static void SDLCALL
  72. SDL_Convert_U8_to_F32_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  73. {
  74. const Uint8 *src = ((const Uint8 *) (cvt->buf + cvt->len_cvt)) - 1;
  75. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 4)) - 1;
  76. int i;
  77. LOG_DEBUG_CONVERT("AUDIO_U8", "AUDIO_F32");
  78. for (i = cvt->len_cvt; i; --i, --src, --dst) {
  79. *dst = (((float) *src) * DIVBY128) - 1.0f;
  80. }
  81. cvt->len_cvt *= 4;
  82. if (cvt->filters[++cvt->filter_index]) {
  83. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  84. }
  85. }
  86. static void SDLCALL
  87. SDL_Convert_S16_to_F32_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  88. {
  89. const Sint16 *src = ((const Sint16 *) (cvt->buf + cvt->len_cvt)) - 1;
  90. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 2)) - 1;
  91. int i;
  92. LOG_DEBUG_CONVERT("AUDIO_S16", "AUDIO_F32");
  93. for (i = cvt->len_cvt / sizeof (Sint16); i; --i, --src, --dst) {
  94. *dst = ((float) *src) * DIVBY32768;
  95. }
  96. cvt->len_cvt *= 2;
  97. if (cvt->filters[++cvt->filter_index]) {
  98. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  99. }
  100. }
  101. static void SDLCALL
  102. SDL_Convert_U16_to_F32_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  103. {
  104. const Uint16 *src = ((const Uint16 *) (cvt->buf + cvt->len_cvt)) - 1;
  105. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 2)) - 1;
  106. int i;
  107. LOG_DEBUG_CONVERT("AUDIO_U16", "AUDIO_F32");
  108. for (i = cvt->len_cvt / sizeof (Uint16); i; --i, --src, --dst) {
  109. *dst = (((float) *src) * DIVBY32768) - 1.0f;
  110. }
  111. cvt->len_cvt *= 2;
  112. if (cvt->filters[++cvt->filter_index]) {
  113. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  114. }
  115. }
  116. static void SDLCALL
  117. SDL_Convert_S32_to_F32_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  118. {
  119. const Sint32 *src = (const Sint32 *) cvt->buf;
  120. float *dst = (float *) cvt->buf;
  121. int i;
  122. LOG_DEBUG_CONVERT("AUDIO_S32", "AUDIO_F32");
  123. for (i = cvt->len_cvt / sizeof (Sint32); i; --i, ++src, ++dst) {
  124. *dst = ((float) (*src>>8)) * DIVBY8388607;
  125. }
  126. if (cvt->filters[++cvt->filter_index]) {
  127. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  128. }
  129. }
  130. static void SDLCALL
  131. SDL_Convert_F32_to_S8_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  132. {
  133. const float *src = (const float *) cvt->buf;
  134. Sint8 *dst = (Sint8 *) cvt->buf;
  135. int i;
  136. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S8");
  137. for (i = cvt->len_cvt / sizeof (float); i; --i, ++src, ++dst) {
  138. const float sample = *src;
  139. if (sample >= 1.0f) {
  140. *dst = 127;
  141. } else if (sample <= -1.0f) {
  142. *dst = -128;
  143. } else {
  144. *dst = (Sint8)(sample * 127.0f);
  145. }
  146. }
  147. cvt->len_cvt /= 4;
  148. if (cvt->filters[++cvt->filter_index]) {
  149. cvt->filters[cvt->filter_index](cvt, AUDIO_S8);
  150. }
  151. }
  152. static void SDLCALL
  153. SDL_Convert_F32_to_U8_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  154. {
  155. const float *src = (const float *) cvt->buf;
  156. Uint8 *dst = (Uint8 *) cvt->buf;
  157. int i;
  158. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_U8");
  159. for (i = cvt->len_cvt / sizeof (float); i; --i, ++src, ++dst) {
  160. const float sample = *src;
  161. if (sample >= 1.0f) {
  162. *dst = 255;
  163. } else if (sample <= -1.0f) {
  164. *dst = 0;
  165. } else {
  166. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  167. }
  168. }
  169. cvt->len_cvt /= 4;
  170. if (cvt->filters[++cvt->filter_index]) {
  171. cvt->filters[cvt->filter_index](cvt, AUDIO_U8);
  172. }
  173. }
  174. static void SDLCALL
  175. SDL_Convert_F32_to_S16_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  176. {
  177. const float *src = (const float *) cvt->buf;
  178. Sint16 *dst = (Sint16 *) cvt->buf;
  179. int i;
  180. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S16");
  181. for (i = cvt->len_cvt / sizeof (float); i; --i, ++src, ++dst) {
  182. const float sample = *src;
  183. if (sample >= 1.0f) {
  184. *dst = 32767;
  185. } else if (sample <= -1.0f) {
  186. *dst = -32768;
  187. } else {
  188. *dst = (Sint16)(sample * 32767.0f);
  189. }
  190. }
  191. cvt->len_cvt /= 2;
  192. if (cvt->filters[++cvt->filter_index]) {
  193. cvt->filters[cvt->filter_index](cvt, AUDIO_S16SYS);
  194. }
  195. }
  196. static void SDLCALL
  197. SDL_Convert_F32_to_U16_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  198. {
  199. const float *src = (const float *) cvt->buf;
  200. Uint16 *dst = (Uint16 *) cvt->buf;
  201. int i;
  202. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_U16");
  203. for (i = cvt->len_cvt / sizeof (float); i; --i, ++src, ++dst) {
  204. const float sample = *src;
  205. if (sample >= 1.0f) {
  206. *dst = 65535;
  207. } else if (sample <= -1.0f) {
  208. *dst = 0;
  209. } else {
  210. *dst = (Uint16)((sample + 1.0f) * 32767.0f);
  211. }
  212. }
  213. cvt->len_cvt /= 2;
  214. if (cvt->filters[++cvt->filter_index]) {
  215. cvt->filters[cvt->filter_index](cvt, AUDIO_U16SYS);
  216. }
  217. }
  218. static void SDLCALL
  219. SDL_Convert_F32_to_S32_Scalar(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  220. {
  221. const float *src = (const float *) cvt->buf;
  222. Sint32 *dst = (Sint32 *) cvt->buf;
  223. int i;
  224. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S32");
  225. for (i = cvt->len_cvt / sizeof (float); i; --i, ++src, ++dst) {
  226. const float sample = *src;
  227. if (sample >= 1.0f) {
  228. *dst = 2147483647;
  229. } else if (sample <= -1.0f) {
  230. *dst = (Sint32) -2147483648LL;
  231. } else {
  232. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  233. }
  234. }
  235. if (cvt->filters[++cvt->filter_index]) {
  236. cvt->filters[cvt->filter_index](cvt, AUDIO_S32SYS);
  237. }
  238. }
  239. #endif
  240. #if HAVE_SSE2_INTRINSICS
  241. static void SDLCALL
  242. SDL_Convert_S8_to_F32_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  243. {
  244. const Sint8 *src = ((const Sint8 *) (cvt->buf + cvt->len_cvt)) - 1;
  245. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 4)) - 1;
  246. int i;
  247. LOG_DEBUG_CONVERT("AUDIO_S8", "AUDIO_F32 (using SSE2)");
  248. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  249. for (i = cvt->len_cvt; i && (((size_t) (dst-15)) & 15); --i, --src, --dst) {
  250. *dst = ((float) *src) * DIVBY128;
  251. }
  252. src -= 15; dst -= 15; /* adjust to read SSE blocks from the start. */
  253. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  254. /* Make sure src is aligned too. */
  255. if ((((size_t) src) & 15) == 0) {
  256. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  257. const __m128i *mmsrc = (const __m128i *) src;
  258. const __m128i zero = _mm_setzero_si128();
  259. const __m128 divby128 = _mm_set1_ps(DIVBY128);
  260. while (i >= 16) { /* 16 * 8-bit */
  261. const __m128i bytes = _mm_load_si128(mmsrc); /* get 16 sint8 into an XMM register. */
  262. /* treat as int16, shift left to clear every other sint16, then back right with sign-extend. Now sint16. */
  263. const __m128i shorts1 = _mm_srai_epi16(_mm_slli_epi16(bytes, 8), 8);
  264. /* right-shift-sign-extend gets us sint16 with the other set of values. */
  265. const __m128i shorts2 = _mm_srai_epi16(bytes, 8);
  266. /* unpack against zero to make these int32, shift to make them sign-extend, convert to float, multiply. Whew! */
  267. const __m128 floats1 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpacklo_epi16(shorts1, zero), 16), 16)), divby128);
  268. const __m128 floats2 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpacklo_epi16(shorts2, zero), 16), 16)), divby128);
  269. const __m128 floats3 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpackhi_epi16(shorts1, zero), 16), 16)), divby128);
  270. const __m128 floats4 = _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_slli_epi32(_mm_unpackhi_epi16(shorts2, zero), 16), 16)), divby128);
  271. /* Interleave back into correct order, store. */
  272. _mm_store_ps(dst, _mm_unpacklo_ps(floats1, floats2));
  273. _mm_store_ps(dst+4, _mm_unpackhi_ps(floats1, floats2));
  274. _mm_store_ps(dst+8, _mm_unpacklo_ps(floats3, floats4));
  275. _mm_store_ps(dst+12, _mm_unpackhi_ps(floats3, floats4));
  276. i -= 16; mmsrc--; dst -= 16;
  277. }
  278. src = (const Sint8 *) mmsrc;
  279. }
  280. src += 15; dst += 15; /* adjust for any scalar finishing. */
  281. /* Finish off any leftovers with scalar operations. */
  282. while (i) {
  283. *dst = ((float) *src) * DIVBY128;
  284. i--; src--; dst--;
  285. }
  286. cvt->len_cvt *= 4;
  287. if (cvt->filters[++cvt->filter_index]) {
  288. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  289. }
  290. }
  291. static void SDLCALL
  292. SDL_Convert_U8_to_F32_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  293. {
  294. const Uint8 *src = ((const Uint8 *) (cvt->buf + cvt->len_cvt)) - 1;
  295. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 4)) - 1;
  296. int i;
  297. LOG_DEBUG_CONVERT("AUDIO_U8", "AUDIO_F32 (using SSE2)");
  298. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  299. for (i = cvt->len_cvt; i && (((size_t) (dst-15)) & 15); --i, --src, --dst) {
  300. *dst = (((float) *src) * DIVBY128) - 1.0f;
  301. }
  302. src -= 15; dst -= 15; /* adjust to read SSE blocks from the start. */
  303. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  304. /* Make sure src is aligned too. */
  305. if ((((size_t) src) & 15) == 0) {
  306. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  307. const __m128i *mmsrc = (const __m128i *) src;
  308. const __m128i zero = _mm_setzero_si128();
  309. const __m128 divby128 = _mm_set1_ps(DIVBY128);
  310. const __m128 minus1 = _mm_set1_ps(-1.0f);
  311. while (i >= 16) { /* 16 * 8-bit */
  312. const __m128i bytes = _mm_load_si128(mmsrc); /* get 16 uint8 into an XMM register. */
  313. /* treat as int16, shift left to clear every other sint16, then back right with zero-extend. Now uint16. */
  314. const __m128i shorts1 = _mm_srli_epi16(_mm_slli_epi16(bytes, 8), 8);
  315. /* right-shift-zero-extend gets us uint16 with the other set of values. */
  316. const __m128i shorts2 = _mm_srli_epi16(bytes, 8);
  317. /* unpack against zero to make these int32, convert to float, multiply, add. Whew! */
  318. /* Note that AVX2 can do floating point multiply+add in one instruction, fwiw. SSE2 cannot. */
  319. const __m128 floats1 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(shorts1, zero)), divby128), minus1);
  320. const __m128 floats2 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi16(shorts2, zero)), divby128), minus1);
  321. const __m128 floats3 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(shorts1, zero)), divby128), minus1);
  322. const __m128 floats4 = _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi16(shorts2, zero)), divby128), minus1);
  323. /* Interleave back into correct order, store. */
  324. _mm_store_ps(dst, _mm_unpacklo_ps(floats1, floats2));
  325. _mm_store_ps(dst+4, _mm_unpackhi_ps(floats1, floats2));
  326. _mm_store_ps(dst+8, _mm_unpacklo_ps(floats3, floats4));
  327. _mm_store_ps(dst+12, _mm_unpackhi_ps(floats3, floats4));
  328. i -= 16; mmsrc--; dst -= 16;
  329. }
  330. src = (const Uint8 *) mmsrc;
  331. }
  332. src += 15; dst += 15; /* adjust for any scalar finishing. */
  333. /* Finish off any leftovers with scalar operations. */
  334. while (i) {
  335. *dst = (((float) *src) * DIVBY128) - 1.0f;
  336. i--; src--; dst--;
  337. }
  338. cvt->len_cvt *= 4;
  339. if (cvt->filters[++cvt->filter_index]) {
  340. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  341. }
  342. }
  343. static void SDLCALL
  344. SDL_Convert_S16_to_F32_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  345. {
  346. const Sint16 *src = ((const Sint16 *) (cvt->buf + cvt->len_cvt)) - 1;
  347. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 2)) - 1;
  348. int i;
  349. LOG_DEBUG_CONVERT("AUDIO_S16", "AUDIO_F32 (using SSE2)");
  350. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  351. for (i = cvt->len_cvt / sizeof (Sint16); i && (((size_t) (dst-7)) & 15); --i, --src, --dst) {
  352. *dst = ((float) *src) * DIVBY32768;
  353. }
  354. src -= 7; dst -= 7; /* adjust to read SSE blocks from the start. */
  355. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  356. /* Make sure src is aligned too. */
  357. if ((((size_t) src) & 15) == 0) {
  358. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  359. const __m128 divby32768 = _mm_set1_ps(DIVBY32768);
  360. while (i >= 8) { /* 8 * 16-bit */
  361. const __m128i ints = _mm_load_si128((__m128i const *) src); /* get 8 sint16 into an XMM register. */
  362. /* treat as int32, shift left to clear every other sint16, then back right with sign-extend. Now sint32. */
  363. const __m128i a = _mm_srai_epi32(_mm_slli_epi32(ints, 16), 16);
  364. /* right-shift-sign-extend gets us sint32 with the other set of values. */
  365. const __m128i b = _mm_srai_epi32(ints, 16);
  366. /* Interleave these back into the right order, convert to float, multiply, store. */
  367. _mm_store_ps(dst, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi32(a, b)), divby32768));
  368. _mm_store_ps(dst+4, _mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi32(a, b)), divby32768));
  369. i -= 8; src -= 8; dst -= 8;
  370. }
  371. }
  372. src += 7; dst += 7; /* adjust for any scalar finishing. */
  373. /* Finish off any leftovers with scalar operations. */
  374. while (i) {
  375. *dst = ((float) *src) * DIVBY32768;
  376. i--; src--; dst--;
  377. }
  378. cvt->len_cvt *= 2;
  379. if (cvt->filters[++cvt->filter_index]) {
  380. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  381. }
  382. }
  383. static void SDLCALL
  384. SDL_Convert_U16_to_F32_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  385. {
  386. const Uint16 *src = ((const Uint16 *) (cvt->buf + cvt->len_cvt)) - 1;
  387. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 2)) - 1;
  388. int i;
  389. LOG_DEBUG_CONVERT("AUDIO_U16", "AUDIO_F32 (using SSE2)");
  390. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  391. for (i = cvt->len_cvt / sizeof (Sint16); i && (((size_t) (dst-7)) & 15); --i, --src, --dst) {
  392. *dst = (((float) *src) * DIVBY32768) - 1.0f;
  393. }
  394. src -= 7; dst -= 7; /* adjust to read SSE blocks from the start. */
  395. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  396. /* Make sure src is aligned too. */
  397. if ((((size_t) src) & 15) == 0) {
  398. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  399. const __m128 divby32768 = _mm_set1_ps(DIVBY32768);
  400. const __m128 minus1 = _mm_set1_ps(-1.0f);
  401. while (i >= 8) { /* 8 * 16-bit */
  402. const __m128i ints = _mm_load_si128((__m128i const *) src); /* get 8 sint16 into an XMM register. */
  403. /* treat as int32, shift left to clear every other sint16, then back right with zero-extend. Now sint32. */
  404. const __m128i a = _mm_srli_epi32(_mm_slli_epi32(ints, 16), 16);
  405. /* right-shift-sign-extend gets us sint32 with the other set of values. */
  406. const __m128i b = _mm_srli_epi32(ints, 16);
  407. /* Interleave these back into the right order, convert to float, multiply, store. */
  408. _mm_store_ps(dst, _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpacklo_epi32(a, b)), divby32768), minus1));
  409. _mm_store_ps(dst+4, _mm_add_ps(_mm_mul_ps(_mm_cvtepi32_ps(_mm_unpackhi_epi32(a, b)), divby32768), minus1));
  410. i -= 8; src -= 8; dst -= 8;
  411. }
  412. }
  413. src += 7; dst += 7; /* adjust for any scalar finishing. */
  414. /* Finish off any leftovers with scalar operations. */
  415. while (i) {
  416. *dst = (((float) *src) * DIVBY32768) - 1.0f;
  417. i--; src--; dst--;
  418. }
  419. cvt->len_cvt *= 2;
  420. if (cvt->filters[++cvt->filter_index]) {
  421. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  422. }
  423. }
  424. static void SDLCALL
  425. SDL_Convert_S32_to_F32_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  426. {
  427. const Sint32 *src = (const Sint32 *) cvt->buf;
  428. float *dst = (float *) cvt->buf;
  429. int i;
  430. LOG_DEBUG_CONVERT("AUDIO_S32", "AUDIO_F32 (using SSE2)");
  431. /* Get dst aligned to 16 bytes */
  432. for (i = cvt->len_cvt / sizeof (Sint32); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  433. *dst = ((float) (*src>>8)) * DIVBY8388607;
  434. }
  435. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  436. /* Make sure src is aligned too. */
  437. if ((((size_t) src) & 15) == 0) {
  438. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  439. const __m128 divby8388607 = _mm_set1_ps(DIVBY8388607);
  440. const __m128i *mmsrc = (const __m128i *) src;
  441. while (i >= 4) { /* 4 * sint32 */
  442. /* shift out lowest bits so int fits in a float32. Small precision loss, but much faster. */
  443. _mm_store_ps(dst, _mm_mul_ps(_mm_cvtepi32_ps(_mm_srai_epi32(_mm_load_si128(mmsrc), 8)), divby8388607));
  444. i -= 4; mmsrc++; dst += 4;
  445. }
  446. src = (const Sint32 *) mmsrc;
  447. }
  448. /* Finish off any leftovers with scalar operations. */
  449. while (i) {
  450. *dst = ((float) (*src>>8)) * DIVBY8388607;
  451. i--; src++; dst++;
  452. }
  453. if (cvt->filters[++cvt->filter_index]) {
  454. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  455. }
  456. }
  457. static void SDLCALL
  458. SDL_Convert_F32_to_S8_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  459. {
  460. const float *src = (const float *) cvt->buf;
  461. Sint8 *dst = (Sint8 *) cvt->buf;
  462. int i;
  463. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S8 (using SSE2)");
  464. /* Get dst aligned to 16 bytes */
  465. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  466. const float sample = *src;
  467. if (sample >= 1.0f) {
  468. *dst = 127;
  469. } else if (sample <= -1.0f) {
  470. *dst = -128;
  471. } else {
  472. *dst = (Sint8)(sample * 127.0f);
  473. }
  474. }
  475. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  476. /* Make sure src is aligned too. */
  477. if ((((size_t) src) & 15) == 0) {
  478. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  479. const __m128 one = _mm_set1_ps(1.0f);
  480. const __m128 negone = _mm_set1_ps(-1.0f);
  481. const __m128 mulby127 = _mm_set1_ps(127.0f);
  482. __m128i *mmdst = (__m128i *) dst;
  483. while (i >= 16) { /* 16 * float32 */
  484. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  485. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+4)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  486. const __m128i ints3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+8)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  487. const __m128i ints4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+12)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  488. _mm_store_si128(mmdst, _mm_packs_epi16(_mm_packs_epi32(ints1, ints2), _mm_packs_epi32(ints3, ints4))); /* pack down, store out. */
  489. i -= 16; src += 16; mmdst++;
  490. }
  491. dst = (Sint8 *) mmdst;
  492. }
  493. /* Finish off any leftovers with scalar operations. */
  494. while (i) {
  495. const float sample = *src;
  496. if (sample >= 1.0f) {
  497. *dst = 127;
  498. } else if (sample <= -1.0f) {
  499. *dst = -128;
  500. } else {
  501. *dst = (Sint8)(sample * 127.0f);
  502. }
  503. i--; src++; dst++;
  504. }
  505. cvt->len_cvt /= 4;
  506. if (cvt->filters[++cvt->filter_index]) {
  507. cvt->filters[cvt->filter_index](cvt, AUDIO_S8);
  508. }
  509. }
  510. static void SDLCALL
  511. SDL_Convert_F32_to_U8_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  512. {
  513. const float *src = (const float *) cvt->buf;
  514. Uint8 *dst = cvt->buf;
  515. int i;
  516. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_U8 (using SSE2)");
  517. /* Get dst aligned to 16 bytes */
  518. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  519. const float sample = *src;
  520. if (sample >= 1.0f) {
  521. *dst = 255;
  522. } else if (sample <= -1.0f) {
  523. *dst = 0;
  524. } else {
  525. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  526. }
  527. }
  528. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  529. /* Make sure src is aligned too. */
  530. if ((((size_t) src) & 15) == 0) {
  531. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  532. const __m128 one = _mm_set1_ps(1.0f);
  533. const __m128 negone = _mm_set1_ps(-1.0f);
  534. const __m128 mulby127 = _mm_set1_ps(127.0f);
  535. __m128i *mmdst = (__m128i *) dst;
  536. while (i >= 16) { /* 16 * float32 */
  537. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  538. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+4)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  539. const __m128i ints3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+8)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  540. const __m128i ints4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_add_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+12)), one), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  541. _mm_store_si128(mmdst, _mm_packus_epi16(_mm_packs_epi32(ints1, ints2), _mm_packs_epi32(ints3, ints4))); /* pack down, store out. */
  542. i -= 16; src += 16; mmdst++;
  543. }
  544. dst = (Uint8 *) mmdst;
  545. }
  546. /* Finish off any leftovers with scalar operations. */
  547. while (i) {
  548. const float sample = *src;
  549. if (sample >= 1.0f) {
  550. *dst = 255;
  551. } else if (sample <= -1.0f) {
  552. *dst = 0;
  553. } else {
  554. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  555. }
  556. i--; src++; dst++;
  557. }
  558. cvt->len_cvt /= 4;
  559. if (cvt->filters[++cvt->filter_index]) {
  560. cvt->filters[cvt->filter_index](cvt, AUDIO_U8);
  561. }
  562. }
  563. static void SDLCALL
  564. SDL_Convert_F32_to_S16_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  565. {
  566. const float *src = (const float *) cvt->buf;
  567. Sint16 *dst = (Sint16 *) cvt->buf;
  568. int i;
  569. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S16 (using SSE2)");
  570. /* Get dst aligned to 16 bytes */
  571. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  572. const float sample = *src;
  573. if (sample >= 1.0f) {
  574. *dst = 32767;
  575. } else if (sample <= -1.0f) {
  576. *dst = -32768;
  577. } else {
  578. *dst = (Sint16)(sample * 32767.0f);
  579. }
  580. }
  581. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  582. /* Make sure src is aligned too. */
  583. if ((((size_t) src) & 15) == 0) {
  584. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  585. const __m128 one = _mm_set1_ps(1.0f);
  586. const __m128 negone = _mm_set1_ps(-1.0f);
  587. const __m128 mulby32767 = _mm_set1_ps(32767.0f);
  588. __m128i *mmdst = (__m128i *) dst;
  589. while (i >= 8) { /* 8 * float32 */
  590. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  591. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+4)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  592. _mm_store_si128(mmdst, _mm_packs_epi32(ints1, ints2)); /* pack to sint16, store out. */
  593. i -= 8; src += 8; mmdst++;
  594. }
  595. dst = (Sint16 *) mmdst;
  596. }
  597. /* Finish off any leftovers with scalar operations. */
  598. while (i) {
  599. const float sample = *src;
  600. if (sample >= 1.0f) {
  601. *dst = 32767;
  602. } else if (sample <= -1.0f) {
  603. *dst = -32768;
  604. } else {
  605. *dst = (Sint16)(sample * 32767.0f);
  606. }
  607. i--; src++; dst++;
  608. }
  609. cvt->len_cvt /= 2;
  610. if (cvt->filters[++cvt->filter_index]) {
  611. cvt->filters[cvt->filter_index](cvt, AUDIO_S16SYS);
  612. }
  613. }
  614. static void SDLCALL
  615. SDL_Convert_F32_to_U16_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  616. {
  617. const float *src = (const float *) cvt->buf;
  618. Uint16 *dst = (Uint16 *) cvt->buf;
  619. int i;
  620. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_U16 (using SSE2)");
  621. /* Get dst aligned to 16 bytes */
  622. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  623. const float sample = *src;
  624. if (sample >= 1.0f) {
  625. *dst = 65535;
  626. } else if (sample <= -1.0f) {
  627. *dst = 0;
  628. } else {
  629. *dst = (Uint16)((sample + 1.0f) * 32767.0f);
  630. }
  631. }
  632. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  633. /* Make sure src is aligned too. */
  634. if ((((size_t) src) & 15) == 0) {
  635. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  636. /* This calculates differently than the scalar path because SSE2 can't
  637. pack int32 data down to unsigned int16. _mm_packs_epi32 does signed
  638. saturation, so that would corrupt our data. _mm_packus_epi32 exists,
  639. but not before SSE 4.1. So we convert from float to sint16, packing
  640. that down with legit signed saturation, and then xor the top bit
  641. against 1. This results in the correct unsigned 16-bit value, even
  642. though it looks like dark magic. */
  643. const __m128 mulby32767 = _mm_set1_ps(32767.0f);
  644. const __m128i topbit = _mm_set1_epi16(-32768);
  645. const __m128 one = _mm_set1_ps(1.0f);
  646. const __m128 negone = _mm_set1_ps(-1.0f);
  647. __m128i *mmdst = (__m128i *) dst;
  648. while (i >= 8) { /* 8 * float32 */
  649. const __m128i ints1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  650. const __m128i ints2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src+4)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  651. _mm_store_si128(mmdst, _mm_xor_si128(_mm_packs_epi32(ints1, ints2), topbit)); /* pack to sint16, xor top bit, store out. */
  652. i -= 8; src += 8; mmdst++;
  653. }
  654. dst = (Uint16 *) mmdst;
  655. }
  656. /* Finish off any leftovers with scalar operations. */
  657. while (i) {
  658. const float sample = *src;
  659. if (sample >= 1.0f) {
  660. *dst = 65535;
  661. } else if (sample <= -1.0f) {
  662. *dst = 0;
  663. } else {
  664. *dst = (Uint16)((sample + 1.0f) * 32767.0f);
  665. }
  666. i--; src++; dst++;
  667. }
  668. cvt->len_cvt /= 2;
  669. if (cvt->filters[++cvt->filter_index]) {
  670. cvt->filters[cvt->filter_index](cvt, AUDIO_U16SYS);
  671. }
  672. }
  673. static void SDLCALL
  674. SDL_Convert_F32_to_S32_SSE2(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  675. {
  676. const float *src = (const float *) cvt->buf;
  677. Sint32 *dst = (Sint32 *) cvt->buf;
  678. int i;
  679. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S32 (using SSE2)");
  680. /* Get dst aligned to 16 bytes */
  681. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  682. const float sample = *src;
  683. if (sample >= 1.0f) {
  684. *dst = 2147483647;
  685. } else if (sample <= -1.0f) {
  686. *dst = (Sint32) -2147483648LL;
  687. } else {
  688. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  689. }
  690. }
  691. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  692. SDL_assert(!i || ((((size_t) src) & 15) == 0));
  693. {
  694. /* Aligned! Do SSE blocks as long as we have 16 bytes available. */
  695. const __m128 one = _mm_set1_ps(1.0f);
  696. const __m128 negone = _mm_set1_ps(-1.0f);
  697. const __m128 mulby8388607 = _mm_set1_ps(8388607.0f);
  698. __m128i *mmdst = (__m128i *) dst;
  699. while (i >= 4) { /* 4 * float32 */
  700. _mm_store_si128(mmdst, _mm_slli_epi32(_mm_cvtps_epi32(_mm_mul_ps(_mm_min_ps(_mm_max_ps(negone, _mm_load_ps(src)), one), mulby8388607)), 8)); /* load 4 floats, clamp, convert to sint32 */
  701. i -= 4; src += 4; mmdst++;
  702. }
  703. dst = (Sint32 *) mmdst;
  704. }
  705. /* Finish off any leftovers with scalar operations. */
  706. while (i) {
  707. const float sample = *src;
  708. if (sample >= 1.0f) {
  709. *dst = 2147483647;
  710. } else if (sample <= -1.0f) {
  711. *dst = (Sint32) -2147483648LL;
  712. } else {
  713. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  714. }
  715. i--; src++; dst++;
  716. }
  717. if (cvt->filters[++cvt->filter_index]) {
  718. cvt->filters[cvt->filter_index](cvt, AUDIO_S32SYS);
  719. }
  720. }
  721. #endif
  722. #if HAVE_NEON_INTRINSICS
  723. static void SDLCALL
  724. SDL_Convert_S8_to_F32_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  725. {
  726. const Sint8 *src = ((const Sint8 *) (cvt->buf + cvt->len_cvt)) - 1;
  727. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 4)) - 1;
  728. int i;
  729. LOG_DEBUG_CONVERT("AUDIO_S8", "AUDIO_F32 (using NEON)");
  730. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  731. for (i = cvt->len_cvt; i && (((size_t) (dst-15)) & 15); --i, --src, --dst) {
  732. *dst = ((float) *src) * DIVBY128;
  733. }
  734. src -= 15; dst -= 15; /* adjust to read NEON blocks from the start. */
  735. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  736. /* Make sure src is aligned too. */
  737. if ((((size_t) src) & 15) == 0) {
  738. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  739. const int8_t *mmsrc = (const int8_t *) src;
  740. const float32x4_t divby128 = vdupq_n_f32(DIVBY128);
  741. while (i >= 16) { /* 16 * 8-bit */
  742. const int8x16_t bytes = vld1q_s8(mmsrc); /* get 16 sint8 into a NEON register. */
  743. const int16x8_t int16hi = vmovl_s8(vget_high_s8(bytes)); /* convert top 8 bytes to 8 int16 */
  744. const int16x8_t int16lo = vmovl_s8(vget_low_s8(bytes)); /* convert bottom 8 bytes to 8 int16 */
  745. /* split int16 to two int32, then convert to float, then multiply to normalize, store. */
  746. vst1q_f32(dst, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(int16lo))), divby128));
  747. vst1q_f32(dst+4, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(int16lo))), divby128));
  748. vst1q_f32(dst+8, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(int16hi))), divby128));
  749. vst1q_f32(dst+12, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(int16hi))), divby128));
  750. i -= 16; mmsrc -= 16; dst -= 16;
  751. }
  752. src = (const Sint8 *) mmsrc;
  753. }
  754. src += 15; dst += 15; /* adjust for any scalar finishing. */
  755. /* Finish off any leftovers with scalar operations. */
  756. while (i) {
  757. *dst = ((float) *src) * DIVBY128;
  758. i--; src--; dst--;
  759. }
  760. cvt->len_cvt *= 4;
  761. if (cvt->filters[++cvt->filter_index]) {
  762. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  763. }
  764. }
  765. static void SDLCALL
  766. SDL_Convert_U8_to_F32_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  767. {
  768. const Uint8 *src = ((const Uint8 *) (cvt->buf + cvt->len_cvt)) - 1;
  769. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 4)) - 1;
  770. int i;
  771. LOG_DEBUG_CONVERT("AUDIO_U8", "AUDIO_F32 (using NEON)");
  772. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  773. for (i = cvt->len_cvt; i && (((size_t) (dst-15)) & 15); --i, --src, --dst) {
  774. *dst = (((float) *src) * DIVBY128) - 1.0f;
  775. }
  776. src -= 15; dst -= 15; /* adjust to read NEON blocks from the start. */
  777. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  778. /* Make sure src is aligned too. */
  779. if ((((size_t) src) & 15) == 0) {
  780. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  781. const uint8_t *mmsrc = (const uint8_t *) src;
  782. const float32x4_t divby128 = vdupq_n_f32(DIVBY128);
  783. const float32x4_t negone = vdupq_n_f32(-1.0f);
  784. while (i >= 16) { /* 16 * 8-bit */
  785. const uint8x16_t bytes = vld1q_u8(mmsrc); /* get 16 uint8 into a NEON register. */
  786. const uint16x8_t uint16hi = vmovl_u8(vget_high_u8(bytes)); /* convert top 8 bytes to 8 uint16 */
  787. const uint16x8_t uint16lo = vmovl_u8(vget_low_u8(bytes)); /* convert bottom 8 bytes to 8 uint16 */
  788. /* split uint16 to two uint32, then convert to float, then multiply to normalize, subtract to adjust for sign, store. */
  789. vst1q_f32(dst, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_low_u16(uint16lo))), divby128));
  790. vst1q_f32(dst+4, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_high_u16(uint16lo))), divby128));
  791. vst1q_f32(dst+8, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_low_u16(uint16hi))), divby128));
  792. vst1q_f32(dst+12, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_high_u16(uint16hi))), divby128));
  793. i -= 16; mmsrc -= 16; dst -= 16;
  794. }
  795. src = (const Uint8 *) mmsrc;
  796. }
  797. src += 15; dst += 15; /* adjust for any scalar finishing. */
  798. /* Finish off any leftovers with scalar operations. */
  799. while (i) {
  800. *dst = (((float) *src) * DIVBY128) - 1.0f;
  801. i--; src--; dst--;
  802. }
  803. cvt->len_cvt *= 4;
  804. if (cvt->filters[++cvt->filter_index]) {
  805. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  806. }
  807. }
  808. static void SDLCALL
  809. SDL_Convert_S16_to_F32_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  810. {
  811. const Sint16 *src = ((const Sint16 *) (cvt->buf + cvt->len_cvt)) - 1;
  812. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 2)) - 1;
  813. int i;
  814. LOG_DEBUG_CONVERT("AUDIO_S16", "AUDIO_F32 (using NEON)");
  815. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  816. for (i = cvt->len_cvt / sizeof (Sint16); i && (((size_t) (dst-7)) & 15); --i, --src, --dst) {
  817. *dst = ((float) *src) * DIVBY32768;
  818. }
  819. src -= 7; dst -= 7; /* adjust to read NEON blocks from the start. */
  820. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  821. /* Make sure src is aligned too. */
  822. if ((((size_t) src) & 15) == 0) {
  823. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  824. const float32x4_t divby32768 = vdupq_n_f32(DIVBY32768);
  825. while (i >= 8) { /* 8 * 16-bit */
  826. const int16x8_t ints = vld1q_s16((int16_t const *) src); /* get 8 sint16 into a NEON register. */
  827. /* split int16 to two int32, then convert to float, then multiply to normalize, store. */
  828. vst1q_f32(dst, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(ints))), divby32768));
  829. vst1q_f32(dst+4, vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(ints))), divby32768));
  830. i -= 8; src -= 8; dst -= 8;
  831. }
  832. }
  833. src += 7; dst += 7; /* adjust for any scalar finishing. */
  834. /* Finish off any leftovers with scalar operations. */
  835. while (i) {
  836. *dst = ((float) *src) * DIVBY32768;
  837. i--; src--; dst--;
  838. }
  839. cvt->len_cvt *= 2;
  840. if (cvt->filters[++cvt->filter_index]) {
  841. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  842. }
  843. }
  844. static void SDLCALL
  845. SDL_Convert_U16_to_F32_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  846. {
  847. const Uint16 *src = ((const Uint16 *) (cvt->buf + cvt->len_cvt)) - 1;
  848. float *dst = ((float *) (cvt->buf + cvt->len_cvt * 2)) - 1;
  849. int i;
  850. LOG_DEBUG_CONVERT("AUDIO_U16", "AUDIO_F32 (using NEON)");
  851. /* Get dst aligned to 16 bytes (since buffer is growing, we don't have to worry about overreading from src) */
  852. for (i = cvt->len_cvt / sizeof (Sint16); i && (((size_t) (dst-7)) & 15); --i, --src, --dst) {
  853. *dst = (((float) *src) * DIVBY32768) - 1.0f;
  854. }
  855. src -= 7; dst -= 7; /* adjust to read NEON blocks from the start. */
  856. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  857. /* Make sure src is aligned too. */
  858. if ((((size_t) src) & 15) == 0) {
  859. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  860. const float32x4_t divby32768 = vdupq_n_f32(DIVBY32768);
  861. const float32x4_t negone = vdupq_n_f32(-1.0f);
  862. while (i >= 8) { /* 8 * 16-bit */
  863. const uint16x8_t uints = vld1q_u16((uint16_t const *) src); /* get 8 uint16 into a NEON register. */
  864. /* split uint16 to two int32, then convert to float, then multiply to normalize, subtract for sign, store. */
  865. vst1q_f32(dst, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_low_u16(uints))), divby32768));
  866. vst1q_f32(dst+4, vmlaq_f32(negone, vcvtq_f32_u32(vmovl_u16(vget_high_u16(uints))), divby32768));
  867. i -= 8; src -= 8; dst -= 8;
  868. }
  869. }
  870. src += 7; dst += 7; /* adjust for any scalar finishing. */
  871. /* Finish off any leftovers with scalar operations. */
  872. while (i) {
  873. *dst = (((float) *src) * DIVBY32768) - 1.0f;
  874. i--; src--; dst--;
  875. }
  876. cvt->len_cvt *= 2;
  877. if (cvt->filters[++cvt->filter_index]) {
  878. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  879. }
  880. }
  881. static void SDLCALL
  882. SDL_Convert_S32_to_F32_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  883. {
  884. const Sint32 *src = (const Sint32 *) cvt->buf;
  885. float *dst = (float *) cvt->buf;
  886. int i;
  887. LOG_DEBUG_CONVERT("AUDIO_S32", "AUDIO_F32 (using NEON)");
  888. /* Get dst aligned to 16 bytes */
  889. for (i = cvt->len_cvt / sizeof (Sint32); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  890. *dst = ((float) (*src>>8)) * DIVBY8388607;
  891. }
  892. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  893. /* Make sure src is aligned too. */
  894. if ((((size_t) src) & 15) == 0) {
  895. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  896. const float32x4_t divby8388607 = vdupq_n_f32(DIVBY8388607);
  897. const int32_t *mmsrc = (const int32_t *) src;
  898. while (i >= 4) { /* 4 * sint32 */
  899. /* shift out lowest bits so int fits in a float32. Small precision loss, but much faster. */
  900. vst1q_f32(dst, vmulq_f32(vcvtq_f32_s32(vshrq_n_s32(vld1q_s32(mmsrc), 8)), divby8388607));
  901. i -= 4; mmsrc += 4; dst += 4;
  902. }
  903. src = (const Sint32 *) mmsrc;
  904. }
  905. /* Finish off any leftovers with scalar operations. */
  906. while (i) {
  907. *dst = ((float) (*src>>8)) * DIVBY8388607;
  908. i--; src++; dst++;
  909. }
  910. if (cvt->filters[++cvt->filter_index]) {
  911. cvt->filters[cvt->filter_index](cvt, AUDIO_F32SYS);
  912. }
  913. }
  914. static void SDLCALL
  915. SDL_Convert_F32_to_S8_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  916. {
  917. const float *src = (const float *) cvt->buf;
  918. Sint8 *dst = (Sint8 *) cvt->buf;
  919. int i;
  920. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S8 (using NEON)");
  921. /* Get dst aligned to 16 bytes */
  922. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  923. const float sample = *src;
  924. if (sample >= 1.0f) {
  925. *dst = 127;
  926. } else if (sample <= -1.0f) {
  927. *dst = -128;
  928. } else {
  929. *dst = (Sint8)(sample * 127.0f);
  930. }
  931. }
  932. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  933. /* Make sure src is aligned too. */
  934. if ((((size_t) src) & 15) == 0) {
  935. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  936. const float32x4_t one = vdupq_n_f32(1.0f);
  937. const float32x4_t negone = vdupq_n_f32(-1.0f);
  938. const float32x4_t mulby127 = vdupq_n_f32(127.0f);
  939. int8_t *mmdst = (int8_t *) dst;
  940. while (i >= 16) { /* 16 * float32 */
  941. const int32x4_t ints1 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  942. const int32x4_t ints2 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+4)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  943. const int32x4_t ints3 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+8)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  944. const int32x4_t ints4 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+12)), one), mulby127)); /* load 4 floats, clamp, convert to sint32 */
  945. const int8x8_t i8lo = vmovn_s16(vcombine_s16(vmovn_s32(ints1), vmovn_s32(ints2))); /* narrow to sint16, combine, narrow to sint8 */
  946. const int8x8_t i8hi = vmovn_s16(vcombine_s16(vmovn_s32(ints3), vmovn_s32(ints4))); /* narrow to sint16, combine, narrow to sint8 */
  947. vst1q_s8(mmdst, vcombine_s8(i8lo, i8hi)); /* combine to int8x16_t, store out */
  948. i -= 16; src += 16; mmdst += 16;
  949. }
  950. dst = (Sint8 *) mmdst;
  951. }
  952. /* Finish off any leftovers with scalar operations. */
  953. while (i) {
  954. const float sample = *src;
  955. if (sample >= 1.0f) {
  956. *dst = 127;
  957. } else if (sample <= -1.0f) {
  958. *dst = -128;
  959. } else {
  960. *dst = (Sint8)(sample * 127.0f);
  961. }
  962. i--; src++; dst++;
  963. }
  964. cvt->len_cvt /= 4;
  965. if (cvt->filters[++cvt->filter_index]) {
  966. cvt->filters[cvt->filter_index](cvt, AUDIO_S8);
  967. }
  968. }
  969. static void SDLCALL
  970. SDL_Convert_F32_to_U8_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  971. {
  972. const float *src = (const float *) cvt->buf;
  973. Uint8 *dst = (Uint8 *) cvt->buf;
  974. int i;
  975. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_U8 (using NEON)");
  976. /* Get dst aligned to 16 bytes */
  977. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  978. const float sample = *src;
  979. if (sample >= 1.0f) {
  980. *dst = 255;
  981. } else if (sample <= -1.0f) {
  982. *dst = 0;
  983. } else {
  984. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  985. }
  986. }
  987. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  988. /* Make sure src is aligned too. */
  989. if ((((size_t) src) & 15) == 0) {
  990. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  991. const float32x4_t one = vdupq_n_f32(1.0f);
  992. const float32x4_t negone = vdupq_n_f32(-1.0f);
  993. const float32x4_t mulby127 = vdupq_n_f32(127.0f);
  994. uint8_t *mmdst = (uint8_t *) dst;
  995. while (i >= 16) { /* 16 * float32 */
  996. const uint32x4_t uints1 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  997. const uint32x4_t uints2 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+4)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  998. const uint32x4_t uints3 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+8)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  999. const uint32x4_t uints4 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+12)), one), one), mulby127)); /* load 4 floats, clamp, convert to uint32 */
  1000. const uint8x8_t ui8lo = vmovn_u16(vcombine_u16(vmovn_u32(uints1), vmovn_u32(uints2))); /* narrow to uint16, combine, narrow to uint8 */
  1001. const uint8x8_t ui8hi = vmovn_u16(vcombine_u16(vmovn_u32(uints3), vmovn_u32(uints4))); /* narrow to uint16, combine, narrow to uint8 */
  1002. vst1q_u8(mmdst, vcombine_u8(ui8lo, ui8hi)); /* combine to uint8x16_t, store out */
  1003. i -= 16; src += 16; mmdst += 16;
  1004. }
  1005. dst = (Uint8 *) mmdst;
  1006. }
  1007. /* Finish off any leftovers with scalar operations. */
  1008. while (i) {
  1009. const float sample = *src;
  1010. if (sample >= 1.0f) {
  1011. *dst = 255;
  1012. } else if (sample <= -1.0f) {
  1013. *dst = 0;
  1014. } else {
  1015. *dst = (Uint8)((sample + 1.0f) * 127.0f);
  1016. }
  1017. i--; src++; dst++;
  1018. }
  1019. cvt->len_cvt /= 4;
  1020. if (cvt->filters[++cvt->filter_index]) {
  1021. cvt->filters[cvt->filter_index](cvt, AUDIO_U8);
  1022. }
  1023. }
  1024. static void SDLCALL
  1025. SDL_Convert_F32_to_S16_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  1026. {
  1027. const float *src = (const float *) cvt->buf;
  1028. Sint16 *dst = (Sint16 *) cvt->buf;
  1029. int i;
  1030. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S16 (using NEON)");
  1031. /* Get dst aligned to 16 bytes */
  1032. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  1033. const float sample = *src;
  1034. if (sample >= 1.0f) {
  1035. *dst = 32767;
  1036. } else if (sample <= -1.0f) {
  1037. *dst = -32768;
  1038. } else {
  1039. *dst = (Sint16)(sample * 32767.0f);
  1040. }
  1041. }
  1042. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  1043. /* Make sure src is aligned too. */
  1044. if ((((size_t) src) & 15) == 0) {
  1045. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  1046. const float32x4_t one = vdupq_n_f32(1.0f);
  1047. const float32x4_t negone = vdupq_n_f32(-1.0f);
  1048. const float32x4_t mulby32767 = vdupq_n_f32(32767.0f);
  1049. int16_t *mmdst = (int16_t *) dst;
  1050. while (i >= 8) { /* 8 * float32 */
  1051. const int32x4_t ints1 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  1052. const int32x4_t ints2 = vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+4)), one), mulby32767)); /* load 4 floats, clamp, convert to sint32 */
  1053. vst1q_s16(mmdst, vcombine_s16(vmovn_s32(ints1), vmovn_s32(ints2))); /* narrow to sint16, combine, store out. */
  1054. i -= 8; src += 8; mmdst += 8;
  1055. }
  1056. dst = (Sint16 *) mmdst;
  1057. }
  1058. /* Finish off any leftovers with scalar operations. */
  1059. while (i) {
  1060. const float sample = *src;
  1061. if (sample >= 1.0f) {
  1062. *dst = 32767;
  1063. } else if (sample <= -1.0f) {
  1064. *dst = -32768;
  1065. } else {
  1066. *dst = (Sint16)(sample * 32767.0f);
  1067. }
  1068. i--; src++; dst++;
  1069. }
  1070. cvt->len_cvt /= 2;
  1071. if (cvt->filters[++cvt->filter_index]) {
  1072. cvt->filters[cvt->filter_index](cvt, AUDIO_S16SYS);
  1073. }
  1074. }
  1075. static void SDLCALL
  1076. SDL_Convert_F32_to_U16_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  1077. {
  1078. const float *src = (const float *) cvt->buf;
  1079. Uint16 *dst = (Uint16 *) cvt->buf;
  1080. int i;
  1081. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_U16 (using NEON)");
  1082. /* Get dst aligned to 16 bytes */
  1083. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  1084. const float sample = *src;
  1085. if (sample >= 1.0f) {
  1086. *dst = 65535;
  1087. } else if (sample <= -1.0f) {
  1088. *dst = 0;
  1089. } else {
  1090. *dst = (Uint16)((sample + 1.0f) * 32767.0f);
  1091. }
  1092. }
  1093. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  1094. /* Make sure src is aligned too. */
  1095. if ((((size_t) src) & 15) == 0) {
  1096. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  1097. const float32x4_t one = vdupq_n_f32(1.0f);
  1098. const float32x4_t negone = vdupq_n_f32(-1.0f);
  1099. const float32x4_t mulby32767 = vdupq_n_f32(32767.0f);
  1100. uint16_t *mmdst = (uint16_t *) dst;
  1101. while (i >= 8) { /* 8 * float32 */
  1102. const uint32x4_t uints1 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), one), mulby32767)); /* load 4 floats, clamp, convert to uint32 */
  1103. const uint32x4_t uints2 = vcvtq_u32_f32(vmulq_f32(vaddq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src+4)), one), one), mulby32767)); /* load 4 floats, clamp, convert to uint32 */
  1104. vst1q_u16(mmdst, vcombine_u16(vmovn_u32(uints1), vmovn_u32(uints2))); /* narrow to uint16, combine, store out. */
  1105. i -= 8; src += 8; mmdst += 8;
  1106. }
  1107. dst = (Uint16 *) mmdst;
  1108. }
  1109. /* Finish off any leftovers with scalar operations. */
  1110. while (i) {
  1111. const float sample = *src;
  1112. if (sample >= 1.0f) {
  1113. *dst = 65535;
  1114. } else if (sample <= -1.0f) {
  1115. *dst = 0;
  1116. } else {
  1117. *dst = (Uint16)((sample + 1.0f) * 32767.0f);
  1118. }
  1119. i--; src++; dst++;
  1120. }
  1121. cvt->len_cvt /= 2;
  1122. if (cvt->filters[++cvt->filter_index]) {
  1123. cvt->filters[cvt->filter_index](cvt, AUDIO_U16SYS);
  1124. }
  1125. }
  1126. static void SDLCALL
  1127. SDL_Convert_F32_to_S32_NEON(SDL_AudioCVT *cvt, SDL_AudioFormat format)
  1128. {
  1129. const float *src = (const float *) cvt->buf;
  1130. Sint32 *dst = (Sint32 *) cvt->buf;
  1131. int i;
  1132. LOG_DEBUG_CONVERT("AUDIO_F32", "AUDIO_S32 (using NEON)");
  1133. /* Get dst aligned to 16 bytes */
  1134. for (i = cvt->len_cvt / sizeof (float); i && (((size_t) dst) & 15); --i, ++src, ++dst) {
  1135. const float sample = *src;
  1136. if (sample >= 1.0f) {
  1137. *dst = 2147483647;
  1138. } else if (sample <= -1.0f) {
  1139. *dst = (-2147483647) - 1;
  1140. } else {
  1141. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  1142. }
  1143. }
  1144. SDL_assert(!i || ((((size_t) dst) & 15) == 0));
  1145. SDL_assert(!i || ((((size_t) src) & 15) == 0));
  1146. {
  1147. /* Aligned! Do NEON blocks as long as we have 16 bytes available. */
  1148. const float32x4_t one = vdupq_n_f32(1.0f);
  1149. const float32x4_t negone = vdupq_n_f32(-1.0f);
  1150. const float32x4_t mulby8388607 = vdupq_n_f32(8388607.0f);
  1151. int32_t *mmdst = (int32_t *) dst;
  1152. while (i >= 4) { /* 4 * float32 */
  1153. vst1q_s32(mmdst, vshlq_n_s32(vcvtq_s32_f32(vmulq_f32(vminq_f32(vmaxq_f32(negone, vld1q_f32(src)), one), mulby8388607)), 8));
  1154. i -= 4; src += 4; mmdst += 4;
  1155. }
  1156. dst = (Sint32 *) mmdst;
  1157. }
  1158. /* Finish off any leftovers with scalar operations. */
  1159. while (i) {
  1160. const float sample = *src;
  1161. if (sample >= 1.0f) {
  1162. *dst = 2147483647;
  1163. } else if (sample <= -1.0f) {
  1164. *dst = (-2147483647) - 1;
  1165. } else {
  1166. *dst = ((Sint32)(sample * 8388607.0f)) << 8;
  1167. }
  1168. i--; src++; dst++;
  1169. }
  1170. if (cvt->filters[++cvt->filter_index]) {
  1171. cvt->filters[cvt->filter_index](cvt, AUDIO_S32SYS);
  1172. }
  1173. }
  1174. #endif
  1175. void SDL_ChooseAudioConverters(void)
  1176. {
  1177. static SDL_bool converters_chosen = SDL_FALSE;
  1178. if (converters_chosen) {
  1179. return;
  1180. }
  1181. #define SET_CONVERTER_FUNCS(fntype) \
  1182. SDL_Convert_S8_to_F32 = SDL_Convert_S8_to_F32_##fntype; \
  1183. SDL_Convert_U8_to_F32 = SDL_Convert_U8_to_F32_##fntype; \
  1184. SDL_Convert_S16_to_F32 = SDL_Convert_S16_to_F32_##fntype; \
  1185. SDL_Convert_U16_to_F32 = SDL_Convert_U16_to_F32_##fntype; \
  1186. SDL_Convert_S32_to_F32 = SDL_Convert_S32_to_F32_##fntype; \
  1187. SDL_Convert_F32_to_S8 = SDL_Convert_F32_to_S8_##fntype; \
  1188. SDL_Convert_F32_to_U8 = SDL_Convert_F32_to_U8_##fntype; \
  1189. SDL_Convert_F32_to_S16 = SDL_Convert_F32_to_S16_##fntype; \
  1190. SDL_Convert_F32_to_U16 = SDL_Convert_F32_to_U16_##fntype; \
  1191. SDL_Convert_F32_to_S32 = SDL_Convert_F32_to_S32_##fntype; \
  1192. converters_chosen = SDL_TRUE
  1193. #if HAVE_SSE2_INTRINSICS
  1194. if (SDL_HasSSE2()) {
  1195. SET_CONVERTER_FUNCS(SSE2);
  1196. return;
  1197. }
  1198. #endif
  1199. #if HAVE_NEON_INTRINSICS
  1200. if (SDL_HasNEON()) {
  1201. SET_CONVERTER_FUNCS(NEON);
  1202. return;
  1203. }
  1204. #endif
  1205. #if NEED_SCALAR_CONVERTER_FALLBACKS
  1206. SET_CONVERTER_FUNCS(Scalar);
  1207. #endif
  1208. #undef SET_CONVERTER_FUNCS
  1209. SDL_assert(converters_chosen == SDL_TRUE);
  1210. }
  1211. /* vi: set ts=4 sw=4 expandtab: */