ref: 77772350f3082470e509ce47418e08af57d5fe28
parent: 08edb85bd0f2b7fb72a1dd5625e94dfe023cb977
author: Luca Barbato <lu_zero@gentoo.org>
date: Sat Apr 29 08:34:20 EDT 2017
ppc: Add convolve8_horiz_avg The 8x8 and larger blocks cases can be sped up further. Change-Id: I54549b03ac6c7a4e3f485738b100c3cac7ac2e15
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -1225,7 +1225,7 @@
#if HAVE_VSX
const ConvolveFunctions convolve8_vsx(
vpx_convolve_copy_vsx, vpx_convolve_avg_vsx, vpx_convolve8_horiz_vsx,
- vpx_convolve8_avg_horiz_c, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
+ vpx_convolve8_avg_horiz_vsx, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
vpx_convolve8_c, vpx_convolve8_avg_c, vpx_scaled_horiz_c,
vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
--- a/vpx_dsp/ppc/vpx_convolve_vsx.c
+++ b/vpx_dsp/ppc/vpx_convolve_vsx.c
@@ -160,11 +160,24 @@
}
}
+static inline void convolve_line(uint8_t *dst, const uint8_t *const src_x,
+ const int16_t *const x_filter) {
+ const int16x8_t s = unpack_to_s16_h(vec_vsx_ld(0, src_x));
+ const int16x8_t f = vec_vsx_ld(0, x_filter);
+ const int32x4_t sum = vec_msum(s, f, vec_splat_s32(0));
+ const int32x4_t bias =
+ vec_sl(vec_splat_s32(1), vec_splat_u32(FILTER_BITS - 1));
+ const int32x4_t avg = vec_sr(vec_sums(sum, bias), vec_splat_u32(FILTER_BITS));
+ const uint8x16_t v = vec_splat(
+ vec_packsu(vec_pack(avg, vec_splat_s32(0)), vec_splat_s16(0)), 3);
+ vec_ste(v, 0, dst);
+}
+
// TODO(lu_zero): Implement 8x8 and bigger block special cases
-static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
- uint8_t *dst, ptrdiff_t dst_stride,
- const InterpKernel *x_filters, int x0_q4,
- int x_step_q4, int w, int h) {
+static inline void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *x_filters, int x0_q4,
+ int x_step_q4, int w, int h) {
int x, y;
src -= SUBPEL_TAPS / 2 - 1;
@@ -171,18 +184,8 @@
for (y = 0; y < h; ++y) {
int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
- const uint8_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
- const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
- const int16x8_t s = unpack_to_s16_h(vec_vsx_ld(0, src_x));
- const int16x8_t f = vec_vsx_ld(0, x_filter);
- const int32x4_t sum = vec_msum(s, f, vec_splat_s32(0));
- const int32x4_t bias =
- vec_sl(vec_splat_s32(1), vec_splat_u32(FILTER_BITS - 1));
- const int32x4_t avg =
- vec_sr(vec_sums(sum, bias), vec_splat_u32(FILTER_BITS));
- const uint8x16_t v = vec_splat(
- vec_packsu(vec_pack(avg, vec_splat_s32(0)), vec_splat_s16(0)), 3);
- vec_ste(v, 0, dst + x);
+ convolve_line(dst + x, &src[x_q4 >> SUBPEL_BITS],
+ x_filters[x_q4 & SUBPEL_MASK]);
x_q4 += x_step_q4;
}
src += src_stride;
@@ -190,6 +193,27 @@
}
}
+static inline void convolve_avg_horiz(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const InterpKernel *x_filters, int x0_q4,
+ int x_step_q4, int w, int h) {
+ int x, y;
+ src -= SUBPEL_TAPS / 2 - 1;
+
+ for (y = 0; y < h; ++y) {
+ int x_q4 = x0_q4;
+ for (x = 0; x < w; ++x) {
+ uint8_t v;
+ convolve_line(&v, &src[x_q4 >> SUBPEL_BITS],
+ x_filters[x_q4 & SUBPEL_MASK]);
+ dst[x] = ROUND_POWER_OF_TWO(dst[x] + v, 1);
+ x_q4 += x_step_q4;
+ }
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
void vpx_convolve8_horiz_vsx(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
@@ -203,4 +227,19 @@
convolve_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4, x_step_q4,
w, h);
+}
+
+void vpx_convolve8_avg_horiz_vsx(const uint8_t *src, ptrdiff_t src_stride,
+ uint8_t *dst, ptrdiff_t dst_stride,
+ const int16_t *filter_x, int x_step_q4,
+ const int16_t *filter_y, int y_step_q4, int w,
+ int h) {
+ const InterpKernel *const filters_x = get_filter_base(filter_x);
+ const int x0_q4 = get_filter_offset(filter_x, filters_x);
+
+ (void)filter_y;
+ (void)y_step_q4;
+
+ convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x, x0_q4,
+ x_step_q4, w, h);
}
--- a/vpx_dsp/vpx_dsp_rtcd_defs.pl
+++ b/vpx_dsp/vpx_dsp_rtcd_defs.pl
@@ -350,7 +350,7 @@
specialize qw/vpx_convolve8_avg sse2 ssse3 neon dspr2 msa/;
add_proto qw/void vpx_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
-specialize qw/vpx_convolve8_avg_horiz sse2 ssse3 neon dspr2 msa/;
+specialize qw/vpx_convolve8_avg_horiz sse2 ssse3 neon dspr2 msa vsx/;
add_proto qw/void vpx_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
specialize qw/vpx_convolve8_avg_vert sse2 ssse3 neon dspr2 msa/;