shithub: libvpx

Download patch

ref: 2057d3ef757a18e6bb005812a9912748ae4c7610
parent: 7498fe2e542183ff6e8091608ae57fade2bde4ee
author: Johann <johannkoenig@google.com>
date: Fri May 12 14:14:27 EDT 2017

use memcpy for unaligned neon stores

Advise the compiler that the store is eventually going to a uint8_t
buffer. This helps avoid getting alignment hints which would cause the
memory access to fail.

Originally added as a workaround for clang:
https://bugs.llvm.org//show_bug.cgi?id=24421

Change-Id: Ie9854b777cfb2f4baaee66764f0e51dcb094d51e

--- a/vp8/common/arm/neon/bilinearpredict_neon.c
+++ b/vp8/common/arm/neon/bilinearpredict_neon.c
@@ -11,6 +11,7 @@
 #include <arm_neon.h>
 #include <string.h>
 #include "./vpx_config.h"
+#include "vpx_dsp/arm/mem_neon.h"
 
 static const uint8_t bifilter4_coeff[8][2] = { { 128, 0 }, { 112, 16 },
                                                { 96, 32 }, { 80, 48 },
@@ -21,35 +22,6 @@
   return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
 }
 
-static INLINE void store4x4(unsigned char *dst, int dst_stride,
-                            const uint8x8_t a0, const uint8x8_t a1) {
-  if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) {
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0);
-    dst += dst_stride;
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1);
-    dst += dst_stride;
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0);
-    dst += dst_stride;
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1);
-  } else {
-    // Store to the aligned local buffer and memcpy instead of vget_lane_u8
-    // which is really really slow.
-    uint32_t output_buffer[4];
-    vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0);
-    vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1);
-    vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0);
-    vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1);
-
-    memcpy(dst, output_buffer, 4);
-    dst += dst_stride;
-    memcpy(dst, output_buffer + 1, 4);
-    dst += dst_stride;
-    memcpy(dst, output_buffer + 2, 4);
-    dst += dst_stride;
-    memcpy(dst, output_buffer + 3, 4);
-  }
-}
-
 void vp8_bilinear_predict4x4_neon(unsigned char *src_ptr,
                                   int src_pixels_per_line, int xoffset,
                                   int yoffset, unsigned char *dst_ptr,
@@ -122,7 +94,7 @@
 
   // secondpass_filter
   if (yoffset == 0) {  // skip_2ndpass_filter
-    store4x4(dst_ptr, dst_pitch, e0, e1);
+    store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
   } else {
     uint8x8_t f0, f1;
     const uint8x8_t filter0 = vdup_n_u8(bifilter4_coeff[yoffset][0]);
@@ -140,7 +112,7 @@
     f0 = vqrshrn_n_u16(b0, 7);
     f1 = vqrshrn_n_u16(b1, 7);
 
-    store4x4(dst_ptr, dst_pitch, f0, f1);
+    store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(f0, f1));
   }
 }
 
--- a/vp8/common/arm/neon/sixtappredict_neon.c
+++ b/vp8/common/arm/neon/sixtappredict_neon.c
@@ -11,6 +11,7 @@
 #include <arm_neon.h>
 #include <string.h>
 #include "./vpx_config.h"
+#include "vpx_dsp/arm/mem_neon.h"
 #include "vpx_ports/mem.h"
 
 static const int8_t vp8_sub_pel_filters[8][8] = {
@@ -42,35 +43,6 @@
   return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32));
 }
 
-static INLINE void store4x4(unsigned char *dst, int dst_stride,
-                            const uint8x8_t a0, const uint8x8_t a1) {
-  if (!((uintptr_t)dst & 0x3) && !(dst_stride & 0x3)) {
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 0);
-    dst += dst_stride;
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a0), 1);
-    dst += dst_stride;
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 0);
-    dst += dst_stride;
-    vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(a1), 1);
-  } else {
-    // Store to the aligned local buffer and memcpy instead of vget_lane_u8
-    // which is really really slow.
-    uint32_t output_buffer[4];
-    vst1_lane_u32(output_buffer, vreinterpret_u32_u8(a0), 0);
-    vst1_lane_u32(output_buffer + 1, vreinterpret_u32_u8(a0), 1);
-    vst1_lane_u32(output_buffer + 2, vreinterpret_u32_u8(a1), 0);
-    vst1_lane_u32(output_buffer + 3, vreinterpret_u32_u8(a1), 1);
-
-    memcpy(dst, output_buffer, 4);
-    dst += dst_stride;
-    memcpy(dst, output_buffer + 1, 4);
-    dst += dst_stride;
-    memcpy(dst, output_buffer + 2, 4);
-    dst += dst_stride;
-    memcpy(dst, output_buffer + 3, 4);
-  }
-}
-
 static INLINE void filter_add_accumulate(const uint8x16_t a, const uint8x16_t b,
                                          const uint8x8_t filter, uint16x8_t *c,
                                          uint16x8_t *d) {
@@ -180,7 +152,7 @@
   e0 = vqrshrun_n_s16(d0, 7);
   e1 = vqrshrun_n_s16(d1, 7);
 
-  store4x4(dst, dst_stride, e0, e1);
+  store_unaligned_u8q(dst, dst_stride, vcombine_u8(e0, e1));
 }
 
 void vp8_sixtap_predict4x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
@@ -297,7 +269,7 @@
   b2 = vqrshrun_n_s16(e4567, 7);
 
   if (yoffset == 0) {  // firstpass_filter4x4_only
-    store4x4(dst_ptr, dst_pitch, b0, b2);
+    store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(b0, b2));
     return;
   }
 
@@ -411,7 +383,7 @@
   e0 = vqrshrun_n_s16(d0, 7);
   e1 = vqrshrun_n_s16(d1, 7);
 
-  store4x4(dst_ptr, dst_pitch, e0, e1);
+  store_unaligned_u8q(dst_ptr, dst_pitch, vcombine_u8(e0, e1));
 }
 
 void vp8_sixtap_predict8x4_neon(unsigned char *src_ptr, int src_pixels_per_line,
--- a/vpx_dsp/arm/mem_neon.h
+++ b/vpx_dsp/arm/mem_neon.h
@@ -69,6 +69,29 @@
 #endif
 }
 
+// Propagate type information to the compiler. Without this the compiler may
+// assume the required alignment of uint32_t (4 bytes) and add alignment hints
+// to the memory access.
+//
+// This is used for functions operating on uint8_t which wish to load or store 4
+// values at a time but which may not be on 4 byte boundaries.
+static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
+  memcpy(buf, &a, 4);
+}
+
+// Store 4 sets of 4 bytes when alignment is not guaranteed.
+static INLINE void store_unaligned_u8q(uint8_t *buf, int stride,
+                                       const uint8x16_t a) {
+  const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
+  uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0));
+  buf += stride;
+  uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1));
+  buf += stride;
+  uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2));
+  buf += stride;
+  uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3));
+}
+
 // Load 2 sets of 4 bytes when alignment is guaranteed.
 static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) {
   uint32x2_t a = vdup_n_u32(0);