shithub: libvpx

Download patch

ref: c6bc7499d9378b79eabf405209f1ad7696328f42
parent: 7eff8f3b1dc25a28c213900d4d6dd8dbad214520
parent: db80c23fd4c25a8b27b2bf3d6b388cf0f8e5c329
author: James Zern <jzern@google.com>
date: Mon Oct 3 18:40:42 EDT 2016

Merge "cosmetics,*_neon.c: rm redundant return from void fns"

--- a/vp9/common/arm/neon/vp9_iht4x4_add_neon.c
+++ b/vp9/common/arm/neon/vp9_iht4x4_add_neon.c
@@ -37,7 +37,6 @@
 
   *q8s16 = vreinterpretq_s16_s32(q0x2s32.val[0]);
   *q9s16 = vreinterpretq_s16_s32(q0x2s32.val[1]);
-  return;
 }
 
 static INLINE void GENERATE_COSINE_CONSTANTS(int16x4_t *d0s16, int16x4_t *d1s16,
@@ -45,7 +44,6 @@
   *d0s16 = vdup_n_s16(cospi_8_64);
   *d1s16 = vdup_n_s16(cospi_16_64);
   *d2s16 = vdup_n_s16(cospi_24_64);
-  return;
 }
 
 static INLINE void GENERATE_SINE_CONSTANTS(int16x4_t *d3s16, int16x4_t *d4s16,
@@ -54,7 +52,6 @@
   *d4s16 = vdup_n_s16(sinpi_2_9);
   *q3s16 = vdupq_n_s16(sinpi_3_9);
   *d5s16 = vdup_n_s16(sinpi_4_9);
-  return;
 }
 
 static INLINE void IDCT4x4_1D(int16x4_t *d0s16, int16x4_t *d1s16,
@@ -90,7 +87,6 @@
   *q8s16 = vaddq_s16(q13s16, q14s16);
   *q9s16 = vsubq_s16(q13s16, q14s16);
   *q9s16 = vcombine_s16(vget_high_s16(*q9s16), vget_low_s16(*q9s16));  // vswp
-  return;
 }
 
 static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16,
@@ -136,7 +132,6 @@
 
   *q8s16 = vcombine_s16(d16s16, d17s16);
   *q9s16 = vcombine_s16(d18s16, d19s16);
-  return;
 }
 
 void vp9_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -158,7 +153,6 @@
     case 0:  // idct_idct is not supported. Fall back to C
       vp9_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
       return;
-      break;
     case 1:  // iadst_idct
       // generate constants
       GENERATE_COSINE_CONSTANTS(&d0s16, &d1s16, &d2s16);
@@ -229,5 +223,4 @@
   vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 1);
   dest -= dest_stride;
   vst1_lane_u32((uint32_t *)dest, vreinterpret_u32_u8(d26u8), 0);
-  return;
 }
--- a/vp9/common/arm/neon/vp9_iht8x8_add_neon.c
+++ b/vp9/common/arm/neon/vp9_iht8x8_add_neon.c
@@ -180,7 +180,6 @@
   *q13s16 = vsubq_s16(q2s16, q5s16);
   *q14s16 = vsubq_s16(q1s16, q6s16);
   *q15s16 = vsubq_s16(q0s16, q7s16);
-  return;
 }
 
 static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
@@ -405,7 +404,6 @@
   *q11s16 = vsubq_s16(q5s16, q2s16);
   *q13s16 = vsubq_s16(q5s16, q6s16);
   *q15s16 = vsubq_s16(q5s16, q4s16);
-  return;
 }
 
 void vp9_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -433,7 +431,6 @@
     case 0:  // idct_idct is not supported. Fall back to C
       vp9_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
       return;
-      break;
     case 1:  // iadst_idct
       // generate IDCT constants
       // GENERATE_IDCT_CONSTANTS
@@ -540,5 +537,4 @@
     vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
     d2 += dest_stride;
   }
-  return;
 }
--- a/vpx_dsp/arm/idct16x16_1_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_1_add_neon.c
@@ -56,5 +56,4 @@
       d2 += dest_stride;
     }
   }
-  return;
 }
--- a/vpx_dsp/arm/idct16x16_add_neon.c
+++ b/vpx_dsp/arm/idct16x16_add_neon.c
@@ -247,7 +247,6 @@
   vst1_u64((uint64_t *)out, d30u64);
   out += output_stride;
   vst1_u64((uint64_t *)out, d31u64);
-  return;
 }
 
 void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
@@ -796,7 +795,6 @@
     out += 4;
     vst1_u64((uint64_t *)out, vreinterpret_u64_s16(vget_high_s16(q15s16)));
   }
-  return;
 }
 
 void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
@@ -932,7 +930,6 @@
   vst1_u64((uint64_t *)out, d30u64);
   out += output_stride;
   vst1_u64((uint64_t *)out, d31u64);
-  return;
 }
 
 void vpx_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
@@ -1227,5 +1224,4 @@
   vst1_u64((uint64_t *)out, d30u64);
   out += 4;
   vst1_u64((uint64_t *)out, d31u64);
-  return;
 }
--- a/vpx_dsp/arm/idct16x16_neon.c
+++ b/vpx_dsp/arm/idct16x16_neon.c
@@ -90,8 +90,6 @@
   // restore d8-d15 register values.
   vpx_pop_neon(store_reg);
 #endif
-
-  return;
 }
 
 void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest,
@@ -147,6 +145,4 @@
   // restore d8-d15 register values.
   vpx_pop_neon(store_reg);
 #endif
-
-  return;
 }
--- a/vpx_dsp/arm/idct32x32_1_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_1_add_neon.c
@@ -35,7 +35,6 @@
   *q14u8 = vld1q_u8(d);
   d += d_stride;
   *q15u8 = vld1q_u8(d);
-  return;
 }
 
 static INLINE void ADD_DIFF_16x8(uint8x16_t qdiffu8, uint8x16_t *q8u8,
@@ -51,7 +50,6 @@
   *q13u8 = vqaddq_u8(*q13u8, qdiffu8);
   *q14u8 = vqaddq_u8(*q14u8, qdiffu8);
   *q15u8 = vqaddq_u8(*q15u8, qdiffu8);
-  return;
 }
 
 static INLINE void SUB_DIFF_16x8(uint8x16_t qdiffu8, uint8x16_t *q8u8,
@@ -67,7 +65,6 @@
   *q13u8 = vqsubq_u8(*q13u8, qdiffu8);
   *q14u8 = vqsubq_u8(*q14u8, qdiffu8);
   *q15u8 = vqsubq_u8(*q15u8, qdiffu8);
-  return;
 }
 
 static INLINE void ST_16x8(uint8_t *d, int d_stride, uint8x16_t *q8u8,
@@ -90,7 +87,6 @@
   vst1q_u8(d, *q14u8);
   d += d_stride;
   vst1q_u8(d, *q15u8);
-  return;
 }
 
 void vpx_idct32x32_1_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -137,5 +133,4 @@
       }
     }
   }
-  return;
 }
--- a/vpx_dsp/arm/idct32x32_add_neon.c
+++ b/vpx_dsp/arm/idct32x32_add_neon.c
@@ -68,7 +68,6 @@
   p2 += stride;
   vst1_s16((int16_t *)p1, d8s16);
   vst1_s16((int16_t *)p2, d11s16);
-  return;
 }
 
 #define STORE_COMBINE_EXTREME_RESULTS(r7, r6) \
@@ -112,7 +111,6 @@
   p2 += stride;
   vst1_s16((int16_t *)p2, d7s16);
   vst1_s16((int16_t *)p1, d4s16);
-  return;
 }
 
 #define DO_BUTTERFLY_STD(const_1, const_2, qA, qB) \
@@ -150,7 +148,6 @@
 
   *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14), vqrshrn_n_s32(q9s32, 14));
   *qBs16 = vcombine_s16(vqrshrn_n_s32(q11s32, 14), vqrshrn_n_s32(q10s32, 14));
-  return;
 }
 
 static INLINE void idct32_transpose_pair(const int16_t *input, int16_t *t_buf) {
@@ -197,7 +194,6 @@
     vst1q_s16(t_buf, q15s16);
     t_buf += 8;
   }
-  return;
 }
 
 static INLINE void idct32_bands_end_1st_pass(int16_t *out, int16x8_t q2s16,
@@ -285,7 +281,6 @@
   q7s16 = vsubq_s16(q2s16, q1s16);
   STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16);
   STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16);
-  return;
 }
 
 static INLINE void idct32_bands_end_2nd_pass(
@@ -380,7 +375,6 @@
   q6s16 = vsubq_s16(q3s16, q0s16);
   q7s16 = vsubq_s16(q2s16, q1s16);
   STORE_COMBINE_EXTREME_RESULTS(r7, r6);
-  return;
 }
 
 void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -638,5 +632,4 @@
       }
     }
   }
-  return;
 }
--- a/vpx_dsp/arm/idct4x4_1_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_1_add_neon.c
@@ -44,5 +44,4 @@
     vst1_lane_u32((uint32_t *)d2, vreinterpret_u32_u8(d6u8), 1);
     d2 += dest_stride;
   }
-  return;
 }
--- a/vpx_dsp/arm/idct4x4_add_neon.c
+++ b/vpx_dsp/arm/idct4x4_add_neon.c
@@ -145,5 +145,4 @@
   vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 1);
   d += dest_stride;
   vst1_lane_u32((uint32_t *)d, vreinterpret_u32_u8(d27u8), 0);
-  return;
 }
--- a/vpx_dsp/arm/idct8x8_1_add_neon.c
+++ b/vpx_dsp/arm/idct8x8_1_add_neon.c
@@ -59,5 +59,4 @@
     vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d31u8));
     d2 += dest_stride;
   }
-  return;
 }
--- a/vpx_dsp/arm/idct8x8_add_neon.c
+++ b/vpx_dsp/arm/idct8x8_add_neon.c
@@ -163,7 +163,6 @@
   *q13s16 = vsubq_s16(q2s16, q5s16);
   *q14s16 = vsubq_s16(q1s16, q6s16);
   *q15s16 = vsubq_s16(q0s16, q7s16);
-  return;
 }
 
 void vpx_idct8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -266,7 +265,6 @@
   d2 += dest_stride;
   vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
   d2 += dest_stride;
-  return;
 }
 
 void vpx_idct8x8_12_add_neon(const tran_low_t *input, uint8_t *dest,
@@ -444,5 +442,4 @@
   d2 += dest_stride;
   vst1_u64((uint64_t *)d2, vreinterpret_u64_u8(d3u8));
   d2 += dest_stride;
-  return;
 }