shithub: libvpx

Download patch

ref: 849b63ffe1723e4e3c021c821cbd50db7cb5211d
parent: 2a9698c1113e209f531fc1cbf814018ab35d1123
author: James Zern <jzern@google.com>
date: Tue Oct 22 15:56:31 EDT 2019

vpx_int_pro_col_sse2: use unaligned loads

this fixes a segfault when scaling is enabled; in some cases depending
on the ratio offsets may become odd.

vpx_int_pro_row_sse2 was updated previously, though the reason wasn't
listed:
54eda13f8 Apply fast motion search to golden reference frame

BUG=webm:1600

Change-Id: I8d5e105d876d8cf917919da301fce362adffab95

--- a/vpx_dsp/x86/avg_intrin_sse2.c
+++ b/vpx_dsp/x86/avg_intrin_sse2.c
@@ -515,7 +515,7 @@
 
 int16_t vpx_int_pro_col_sse2(const uint8_t *ref, const int width) {
   __m128i zero = _mm_setzero_si128();
-  __m128i src_line = _mm_load_si128((const __m128i *)ref);
+  __m128i src_line = _mm_loadu_si128((const __m128i *)ref);
   __m128i s0 = _mm_sad_epu8(src_line, zero);
   __m128i s1;
   int i;
@@ -522,7 +522,7 @@
 
   for (i = 16; i < width; i += 16) {
     ref += 16;
-    src_line = _mm_load_si128((const __m128i *)ref);
+    src_line = _mm_loadu_si128((const __m128i *)ref);
     s1 = _mm_sad_epu8(src_line, zero);
     s0 = _mm_adds_epu16(s0, s1);
   }