shithub: libvpx

Download patch

ref: 4808d831dbc4e9ff83fa0efe11207bc135c6d6f5
parent: 231aa6ae32fca53efc45ffd39e14650346fcb030
author: Jonathan Wright <jonathan.wright@arm.com>
date: Wed May 12 12:05:56 EDT 2021

Optimize remaining mse and sse functions in variance_neon.c

Implement sum of squared difference calculations in vpx_mse16x16_neon
and vpx_get4x4sse_cs_neon using the ABD and UDOT instructions -
instead of widening subtracts followed by a sequence of MLAs.

The existing implementation is retained for use on CPUs that do not
implement the Armv8.4-A UDOT instruction. This commit also updates
the variable names used in the existing implementations to be more
descriptive.

Bug: b/181236880
Change-Id: Id4ad8ea7c808af1ac9bb5f1b63327ab487e4b1c7

--- a/vpx_dsp/arm/variance_neon.c
+++ b/vpx_dsp/arm/variance_neon.c
@@ -357,117 +357,165 @@
   return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12);
 }
 
+#if defined(__ARM_FEATURE_DOTPROD) && (__ARM_FEATURE_DOTPROD == 1)
+
 unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int src_stride,
                                const unsigned char *ref_ptr, int ref_stride,
                                unsigned int *sse) {
   int i;
-  int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
-  int64x1_t d0s64;
-  uint8x16_t q0u8, q1u8, q2u8, q3u8;
-  int32x4_t q7s32, q8s32, q9s32, q10s32;
-  uint16x8_t q11u16, q12u16, q13u16, q14u16;
-  int64x2_t q1s64;
+  uint8x16_t a[2], b[2], abs_diff[2];
+  uint32x4_t sse_vec[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
 
-  q7s32 = vdupq_n_s32(0);
-  q8s32 = vdupq_n_s32(0);
-  q9s32 = vdupq_n_s32(0);
-  q10s32 = vdupq_n_s32(0);
-
-  for (i = 0; i < 8; i++) {  // mse16x16_neon_loop
-    q0u8 = vld1q_u8(src_ptr);
+  for (i = 0; i < 8; i++) {
+    a[0] = vld1q_u8(src_ptr);
     src_ptr += src_stride;
-    q1u8 = vld1q_u8(src_ptr);
+    a[1] = vld1q_u8(src_ptr);
     src_ptr += src_stride;
-    q2u8 = vld1q_u8(ref_ptr);
+    b[0] = vld1q_u8(ref_ptr);
     ref_ptr += ref_stride;
-    q3u8 = vld1q_u8(ref_ptr);
+    b[1] = vld1q_u8(ref_ptr);
     ref_ptr += ref_stride;
 
-    q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
-    q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
-    q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
-    q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
+    abs_diff[0] = vabdq_u8(a[0], b[0]);
+    abs_diff[1] = vabdq_u8(a[1], b[1]);
 
-    d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
-    d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
-    q7s32 = vmlal_s16(q7s32, d22s16, d22s16);
-    q8s32 = vmlal_s16(q8s32, d23s16, d23s16);
+    sse_vec[0] = vdotq_u32(sse_vec[0], abs_diff[0], abs_diff[0]);
+    sse_vec[1] = vdotq_u32(sse_vec[1], abs_diff[1], abs_diff[1]);
+  }
 
-    d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
-    d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
-    q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
-    q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+  *sse = horizontal_add_uint32x4(vaddq_u32(sse_vec[0], sse_vec[1]));
+  return horizontal_add_uint32x4(vaddq_u32(sse_vec[0], sse_vec[1]));
+}
 
-    d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
-    d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
-    q7s32 = vmlal_s16(q7s32, d26s16, d26s16);
-    q8s32 = vmlal_s16(q8s32, d27s16, d27s16);
+unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr, int src_stride,
+                                   const unsigned char *ref_ptr,
+                                   int ref_stride) {
+  uint8x8_t a[4], b[4], abs_diff[4];
+  uint32x2_t sse = vdup_n_u32(0);
+
+  a[0] = vld1_u8(src_ptr);
+  src_ptr += src_stride;
+  b[0] = vld1_u8(ref_ptr);
+  ref_ptr += ref_stride;
+  a[1] = vld1_u8(src_ptr);
+  src_ptr += src_stride;
+  b[1] = vld1_u8(ref_ptr);
+  ref_ptr += ref_stride;
+  a[2] = vld1_u8(src_ptr);
+  src_ptr += src_stride;
+  b[2] = vld1_u8(ref_ptr);
+  ref_ptr += ref_stride;
+  a[3] = vld1_u8(src_ptr);
+  b[3] = vld1_u8(ref_ptr);
+
+  abs_diff[0] = vabd_u8(a[0], b[0]);
+  abs_diff[1] = vabd_u8(a[1], b[1]);
+  abs_diff[2] = vabd_u8(a[2], b[2]);
+  abs_diff[3] = vabd_u8(a[3], b[3]);
+
+  sse = vdot_u32(sse, abs_diff[0], abs_diff[0]);
+  sse = vdot_u32(sse, abs_diff[1], abs_diff[1]);
+  sse = vdot_u32(sse, abs_diff[2], abs_diff[2]);
+  sse = vdot_u32(sse, abs_diff[3], abs_diff[3]);
 
-    d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
-    d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
-    q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
-    q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
-  }
+  return vget_lane_u32(sse, 0);
+}
 
-  q7s32 = vaddq_s32(q7s32, q8s32);
-  q9s32 = vaddq_s32(q9s32, q10s32);
-  q10s32 = vaddq_s32(q7s32, q9s32);
+#else
 
-  q1s64 = vpaddlq_s32(q10s32);
-  d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int src_stride,
+                               const unsigned char *ref_ptr, int ref_stride,
+                               unsigned int *sse) {
+  int i;
+  uint8x16_t a[2], b[2];
+  int16x4_t diff_lo[4], diff_hi[4];
+  uint16x8_t diff[4];
+  int32x4_t sse_vec[4] = { vdupq_n_s32(0), vdupq_n_s32(0), vdupq_n_s32(0),
+                           vdupq_n_s32(0) };
 
-  vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0);
-  return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+  for (i = 0; i < 8; i++) {
+    a[0] = vld1q_u8(src_ptr);
+    src_ptr += src_stride;
+    a[1] = vld1q_u8(src_ptr);
+    src_ptr += src_stride;
+    b[0] = vld1q_u8(ref_ptr);
+    ref_ptr += ref_stride;
+    b[1] = vld1q_u8(ref_ptr);
+    ref_ptr += ref_stride;
+
+    diff[0] = vsubl_u8(vget_low_u8(a[0]), vget_low_u8(b[0]));
+    diff[1] = vsubl_u8(vget_high_u8(a[0]), vget_high_u8(b[0]));
+    diff[2] = vsubl_u8(vget_low_u8(a[1]), vget_low_u8(b[1]));
+    diff[3] = vsubl_u8(vget_high_u8(a[1]), vget_high_u8(b[1]));
+
+    diff_lo[0] = vreinterpret_s16_u16(vget_low_u16(diff[0]));
+    diff_lo[1] = vreinterpret_s16_u16(vget_low_u16(diff[1]));
+    sse_vec[0] = vmlal_s16(sse_vec[0], diff_lo[0], diff_lo[0]);
+    sse_vec[1] = vmlal_s16(sse_vec[1], diff_lo[1], diff_lo[1]);
+
+    diff_lo[2] = vreinterpret_s16_u16(vget_low_u16(diff[2]));
+    diff_lo[3] = vreinterpret_s16_u16(vget_low_u16(diff[3]));
+    sse_vec[2] = vmlal_s16(sse_vec[2], diff_lo[2], diff_lo[2]);
+    sse_vec[3] = vmlal_s16(sse_vec[3], diff_lo[3], diff_lo[3]);
+
+    diff_hi[0] = vreinterpret_s16_u16(vget_high_u16(diff[0]));
+    diff_hi[1] = vreinterpret_s16_u16(vget_high_u16(diff[1]));
+    sse_vec[0] = vmlal_s16(sse_vec[0], diff_hi[0], diff_hi[0]);
+    sse_vec[1] = vmlal_s16(sse_vec[1], diff_hi[1], diff_hi[1]);
+
+    diff_hi[2] = vreinterpret_s16_u16(vget_high_u16(diff[2]));
+    diff_hi[3] = vreinterpret_s16_u16(vget_high_u16(diff[3]));
+    sse_vec[2] = vmlal_s16(sse_vec[2], diff_hi[2], diff_hi[2]);
+    sse_vec[3] = vmlal_s16(sse_vec[3], diff_hi[3], diff_hi[3]);
+  }
+
+  sse_vec[0] = vaddq_s32(sse_vec[0], sse_vec[1]);
+  sse_vec[2] = vaddq_s32(sse_vec[2], sse_vec[3]);
+  sse_vec[0] = vaddq_s32(sse_vec[0], sse_vec[2]);
+
+  *sse = horizontal_add_uint32x4(vreinterpretq_u32_s32(sse_vec[0]));
+  return horizontal_add_uint32x4(vreinterpretq_u32_s32(sse_vec[0]));
 }
 
 unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr, int src_stride,
                                    const unsigned char *ref_ptr,
                                    int ref_stride) {
-  int16x4_t d22s16, d24s16, d26s16, d28s16;
-  int64x1_t d0s64;
-  uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
-  int32x4_t q7s32, q8s32, q9s32, q10s32;
-  uint16x8_t q11u16, q12u16, q13u16, q14u16;
-  int64x2_t q1s64;
+  uint8x8_t a[4], b[4];
+  int16x4_t diff_lo[4];
+  uint16x8_t diff[4];
+  int32x4_t sse;
 
-  d0u8 = vld1_u8(src_ptr);
+  a[0] = vld1_u8(src_ptr);
   src_ptr += src_stride;
-  d4u8 = vld1_u8(ref_ptr);
+  b[0] = vld1_u8(ref_ptr);
   ref_ptr += ref_stride;
-  d1u8 = vld1_u8(src_ptr);
+  a[1] = vld1_u8(src_ptr);
   src_ptr += src_stride;
-  d5u8 = vld1_u8(ref_ptr);
+  b[1] = vld1_u8(ref_ptr);
   ref_ptr += ref_stride;
-  d2u8 = vld1_u8(src_ptr);
+  a[2] = vld1_u8(src_ptr);
   src_ptr += src_stride;
-  d6u8 = vld1_u8(ref_ptr);
+  b[2] = vld1_u8(ref_ptr);
   ref_ptr += ref_stride;
-  d3u8 = vld1_u8(src_ptr);
-  src_ptr += src_stride;
-  d7u8 = vld1_u8(ref_ptr);
-  ref_ptr += ref_stride;
+  a[3] = vld1_u8(src_ptr);
+  b[3] = vld1_u8(ref_ptr);
 
-  q11u16 = vsubl_u8(d0u8, d4u8);
-  q12u16 = vsubl_u8(d1u8, d5u8);
-  q13u16 = vsubl_u8(d2u8, d6u8);
-  q14u16 = vsubl_u8(d3u8, d7u8);
+  diff[0] = vsubl_u8(a[0], b[0]);
+  diff[1] = vsubl_u8(a[1], b[1]);
+  diff[2] = vsubl_u8(a[2], b[2]);
+  diff[3] = vsubl_u8(a[3], b[3]);
 
-  d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16));
-  d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16));
-  d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16));
-  d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16));
+  diff_lo[0] = vget_low_s16(vreinterpretq_s16_u16(diff[0]));
+  diff_lo[1] = vget_low_s16(vreinterpretq_s16_u16(diff[1]));
+  diff_lo[2] = vget_low_s16(vreinterpretq_s16_u16(diff[2]));
+  diff_lo[3] = vget_low_s16(vreinterpretq_s16_u16(diff[3]));
 
-  q7s32 = vmull_s16(d22s16, d22s16);
-  q8s32 = vmull_s16(d24s16, d24s16);
-  q9s32 = vmull_s16(d26s16, d26s16);
-  q10s32 = vmull_s16(d28s16, d28s16);
+  sse = vmull_s16(diff_lo[0], diff_lo[0]);
+  sse = vmlal_s16(sse, diff_lo[1], diff_lo[1]);
+  sse = vmlal_s16(sse, diff_lo[2], diff_lo[2]);
+  sse = vmlal_s16(sse, diff_lo[3], diff_lo[3]);
 
-  q7s32 = vaddq_s32(q7s32, q8s32);
-  q9s32 = vaddq_s32(q9s32, q10s32);
-  q9s32 = vaddq_s32(q7s32, q9s32);
-
-  q1s64 = vpaddlq_s32(q9s32);
-  d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
-
-  return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+  return horizontal_add_uint32x4(vreinterpretq_u32_s32(sse));
 }
+
+#endif