ref: 824f1becb09e66d2e38d7b5afeae89f4cec993d0
parent: c64ad657abf4d9f641a3d00cf83b987c55b00d02
author: Jean-Marc Valin <jmvalin@jmvalin.ca>
date: Fri Mar 8 18:28:48 EST 2024
Fix unaligned load with MSVC MSVC doesn't have a real __m128i_u, so it would generate an aligned store, resulting in a segfault. Adding explicit loadu/stureu intrinsics to make sure the compiler generates unaligned load/store
--- a/silk/x86/NSQ_del_dec_avx2.c
+++ b/silk/x86/NSQ_del_dec_avx2.c
@@ -986,7 +986,7 @@
for (i = NSQ->sLTP_shp_buf_idx - psEncC->ltp_mem_length; i < NSQ->sLTP_shp_buf_idx; i+=4)
{__m128i_u* p = (__m128i_u*)&NSQ->sLTP_shp_Q14[i];
- *p = silk_mm_smulww_epi32(*p, gain_adj_Q16);
+ _mm_storeu_si128(p, silk_mm_smulww_epi32(_mm_loadu_si128(p), gain_adj_Q16));
}
/* Scale long-term prediction state */
--
⑨