shithub: libvpx

Download patch

ref: 691585f6b8ef361e22b119b696e092853e3ace8c
parent: 10bab1ec2966e0c22e80965b00b2a953a338880b
parent: d586cdb4d471c37c733a6ccb9abe25678e4a6385
author: James Zern <jzern@google.com>
date: Fri Sep 22 03:35:55 EDT 2017

Merge changes If59743aa,Ib046fe28,Ia2345752

* changes:
  Remove the unnecessary cast of (int16_t)cospi_{1...31}_64
  Remove the unnecessary upcasts of (int)cospi_{1...31}_64
  Change cospi_{1...31}_64 from tran_high_t to tran_coef_t

--- a/vp9/encoder/x86/vp9_dct_intrin_sse2.c
+++ b/vp9/encoder/x86/vp9_dct_intrin_sse2.c
@@ -72,7 +72,7 @@
 }
 
 static void fdct4_sse2(__m128i *in) {
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@@ -194,7 +194,7 @@
   //    When we use them, in one case, they are all the same. In all others
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
   const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
@@ -709,7 +709,7 @@
 
 static void fdct8_sse2(__m128i *in) {
   // constants
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
   const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
@@ -861,7 +861,7 @@
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
   const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__const_0 = _mm_set1_epi16(0);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
 
@@ -1142,7 +1142,7 @@
 static void fdct16_8col(__m128i *in) {
   // perform 16x16 1-D DCT for 8 columns
   __m128i i[8], s[8], p[8], t[8], u[16], v[16];
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
   const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
@@ -1489,8 +1489,8 @@
   const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
   const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
-  const __m128i k__cospi_m16_m16 = _mm_set1_epi16((int16_t)-cospi_16_64);
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
--- a/vp9/encoder/x86/vp9_dct_ssse3.c
+++ b/vp9/encoder/x86/vp9_dct_ssse3.c
@@ -31,7 +31,7 @@
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
   const __m128i k__dual_p16_p16 = dual_set_epi16(23170, 23170);
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
   const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
--- a/vpx_dsp/arm/fdct16x16_neon.c
+++ b/vpx_dsp/arm/fdct16x16_neon.c
@@ -169,8 +169,8 @@
 
 // fdct_round_shift(a * c0 +/- b * c1)
 static INLINE void butterfly_two_coeff(const int16x8_t a, const int16x8_t b,
-                                       const tran_high_t c0,
-                                       const tran_high_t c1, int16x8_t *add,
+                                       const tran_coef_t c0,
+                                       const tran_coef_t c1, int16x8_t *add,
                                        int16x8_t *sub) {
   const int32x4_t a0 = vmull_n_s16(vget_low_s16(a), c0);
   const int32x4_t a1 = vmull_n_s16(vget_high_s16(a), c0);
--- a/vpx_dsp/arm/fdct32x32_neon.c
+++ b/vpx_dsp/arm/fdct32x32_neon.c
@@ -214,8 +214,8 @@
 
 // fdct_round_shift(a * c0 +/- b * c1)
 static INLINE void butterfly_two_coeff(const int16x8_t a, const int16x8_t b,
-                                       const tran_high_t constant0,
-                                       const tran_high_t constant1,
+                                       const tran_coef_t constant0,
+                                       const tran_coef_t constant1,
                                        int16x8_t *add, int16x8_t *sub) {
   const int32x4_t a0 = vmull_n_s16(vget_low_s16(a), constant0);
   const int32x4_t a1 = vmull_n_s16(vget_high_s16(a), constant0);
@@ -590,19 +590,14 @@
 // Like butterfly_one_coeff, but with s32.
 static INLINE void butterfly_one_coeff_s32(
     const int32x4_t a_lo, const int32x4_t a_hi, const int32x4_t b_lo,
-    const int32x4_t b_hi, const tran_high_t constant, int32x4_t *add_lo,
+    const int32x4_t b_hi, const int32_t constant, int32x4_t *add_lo,
     int32x4_t *add_hi, int32x4_t *sub_lo, int32x4_t *sub_hi) {
-  // TODO(johannkoenig): Strangely there is only a conversion warning on int64_t
-  // to int32_t (const tran_high_t (aka const long long)) but not for int64_t to
-  // int16_t. The constants fit in int16_t. Investigate using int16_t for the
-  // constants to avoid bouncing between types.
-  const int32_t constant_s32 = (int32_t)constant;
-  const int32x4_t a_lo_0 = vmulq_n_s32(a_lo, constant_s32);
-  const int32x4_t a_hi_0 = vmulq_n_s32(a_hi, constant_s32);
-  const int32x4_t sum0 = vmlaq_n_s32(a_lo_0, b_lo, constant_s32);
-  const int32x4_t sum1 = vmlaq_n_s32(a_hi_0, b_hi, constant_s32);
-  const int32x4_t diff0 = vmlsq_n_s32(a_lo_0, b_lo, constant_s32);
-  const int32x4_t diff1 = vmlsq_n_s32(a_hi_0, b_hi, constant_s32);
+  const int32x4_t a_lo_0 = vmulq_n_s32(a_lo, constant);
+  const int32x4_t a_hi_0 = vmulq_n_s32(a_hi, constant);
+  const int32x4_t sum0 = vmlaq_n_s32(a_lo_0, b_lo, constant);
+  const int32x4_t sum1 = vmlaq_n_s32(a_hi_0, b_hi, constant);
+  const int32x4_t diff0 = vmlsq_n_s32(a_lo_0, b_lo, constant);
+  const int32x4_t diff1 = vmlsq_n_s32(a_hi_0, b_hi, constant);
   *add_lo = vrshrq_n_s32(sum0, DCT_CONST_BITS);
   *add_hi = vrshrq_n_s32(sum1, DCT_CONST_BITS);
   *sub_lo = vrshrq_n_s32(diff0, DCT_CONST_BITS);
@@ -621,19 +616,17 @@
 // Like butterfly_two_coeff, but with s32.
 static INLINE void butterfly_two_coeff_s32(
     const int32x4_t a_lo, const int32x4_t a_hi, const int32x4_t b_lo,
-    const int32x4_t b_hi, const tran_high_t constant0,
-    const tran_high_t constant1, int32x4_t *add_lo, int32x4_t *add_hi,
-    int32x4_t *sub_lo, int32x4_t *sub_hi) {
-  const int32_t constant0_s32 = (int32_t)constant0;
-  const int32_t constant1_s32 = (int32_t)constant1;
-  const int32x4_t a0 = vmulq_n_s32(a_lo, constant0_s32);
-  const int32x4_t a1 = vmulq_n_s32(a_hi, constant0_s32);
-  const int32x4_t a2 = vmulq_n_s32(a_lo, constant1_s32);
-  const int32x4_t a3 = vmulq_n_s32(a_hi, constant1_s32);
-  const int32x4_t sum0 = vmlaq_n_s32(a2, b_lo, constant0_s32);
-  const int32x4_t sum1 = vmlaq_n_s32(a3, b_hi, constant0_s32);
-  const int32x4_t diff0 = vmlsq_n_s32(a0, b_lo, constant1_s32);
-  const int32x4_t diff1 = vmlsq_n_s32(a1, b_hi, constant1_s32);
+    const int32x4_t b_hi, const int32_t constant0, const int32_t constant1,
+    int32x4_t *add_lo, int32x4_t *add_hi, int32x4_t *sub_lo,
+    int32x4_t *sub_hi) {
+  const int32x4_t a0 = vmulq_n_s32(a_lo, constant0);
+  const int32x4_t a1 = vmulq_n_s32(a_hi, constant0);
+  const int32x4_t a2 = vmulq_n_s32(a_lo, constant1);
+  const int32x4_t a3 = vmulq_n_s32(a_hi, constant1);
+  const int32x4_t sum0 = vmlaq_n_s32(a2, b_lo, constant0);
+  const int32x4_t sum1 = vmlaq_n_s32(a3, b_hi, constant0);
+  const int32x4_t diff0 = vmlsq_n_s32(a0, b_lo, constant1);
+  const int32x4_t diff1 = vmlsq_n_s32(a1, b_hi, constant1);
   *add_lo = vrshrq_n_s32(sum0, DCT_CONST_BITS);
   *add_hi = vrshrq_n_s32(sum1, DCT_CONST_BITS);
   *sub_lo = vrshrq_n_s32(diff0, DCT_CONST_BITS);
--- a/vpx_dsp/arm/fdct_neon.c
+++ b/vpx_dsp/arm/fdct_neon.c
@@ -50,8 +50,8 @@
     // Must expand all elements to s32. See 'needs32' comment in fwd_txfm.c.
     const int32x4_t s_0_p_s_1 = vaddl_s16(s_0, s_1);
     const int32x4_t s_0_m_s_1 = vsubl_s16(s_0, s_1);
-    const int32x4_t temp1 = vmulq_n_s32(s_0_p_s_1, (int16_t)cospi_16_64);
-    const int32x4_t temp2 = vmulq_n_s32(s_0_m_s_1, (int16_t)cospi_16_64);
+    const int32x4_t temp1 = vmulq_n_s32(s_0_p_s_1, cospi_16_64);
+    const int32x4_t temp2 = vmulq_n_s32(s_0_m_s_1, cospi_16_64);
 
     // fdct_round_shift
     int16x4_t out_0 = vrshrn_n_s32(temp1, DCT_CONST_BITS);
@@ -59,13 +59,11 @@
 
     // s_3 * cospi_8_64 + s_2 * cospi_24_64
     // s_3 * cospi_24_64 - s_2 * cospi_8_64
-    const int32x4_t s_3_cospi_8_64 = vmull_n_s16(s_3, (int16_t)cospi_8_64);
-    const int32x4_t s_3_cospi_24_64 = vmull_n_s16(s_3, (int16_t)cospi_24_64);
+    const int32x4_t s_3_cospi_8_64 = vmull_n_s16(s_3, cospi_8_64);
+    const int32x4_t s_3_cospi_24_64 = vmull_n_s16(s_3, cospi_24_64);
 
-    const int32x4_t temp3 =
-        vmlal_n_s16(s_3_cospi_8_64, s_2, (int16_t)cospi_24_64);
-    const int32x4_t temp4 =
-        vmlsl_n_s16(s_3_cospi_24_64, s_2, (int16_t)cospi_8_64);
+    const int32x4_t temp3 = vmlal_n_s16(s_3_cospi_8_64, s_2, cospi_24_64);
+    const int32x4_t temp4 = vmlsl_n_s16(s_3_cospi_24_64, s_2, cospi_8_64);
 
     // fdct_round_shift
     int16x4_t out_1 = vrshrn_n_s32(temp3, DCT_CONST_BITS);
--- a/vpx_dsp/arm/fwd_txfm_neon.c
+++ b/vpx_dsp/arm/fwd_txfm_neon.c
@@ -48,18 +48,18 @@
     int32x4_t v_t0_hi = vaddl_s16(vget_high_s16(v_x0), vget_high_s16(v_x1));
     int32x4_t v_t1_lo = vsubl_s16(vget_low_s16(v_x0), vget_low_s16(v_x1));
     int32x4_t v_t1_hi = vsubl_s16(vget_high_s16(v_x0), vget_high_s16(v_x1));
-    int32x4_t v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_24_64);
-    int32x4_t v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_24_64);
-    int32x4_t v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_24_64);
-    int32x4_t v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_24_64);
-    v_t2_lo = vmlal_n_s16(v_t2_lo, vget_low_s16(v_x3), (int16_t)cospi_8_64);
-    v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), (int16_t)cospi_8_64);
-    v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), (int16_t)cospi_8_64);
-    v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), (int16_t)cospi_8_64);
-    v_t0_lo = vmulq_n_s32(v_t0_lo, (int32_t)cospi_16_64);
-    v_t0_hi = vmulq_n_s32(v_t0_hi, (int32_t)cospi_16_64);
-    v_t1_lo = vmulq_n_s32(v_t1_lo, (int32_t)cospi_16_64);
-    v_t1_hi = vmulq_n_s32(v_t1_hi, (int32_t)cospi_16_64);
+    int32x4_t v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), cospi_24_64);
+    int32x4_t v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), cospi_24_64);
+    int32x4_t v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), cospi_24_64);
+    int32x4_t v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), cospi_24_64);
+    v_t2_lo = vmlal_n_s16(v_t2_lo, vget_low_s16(v_x3), cospi_8_64);
+    v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), cospi_8_64);
+    v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), cospi_8_64);
+    v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), cospi_8_64);
+    v_t0_lo = vmulq_n_s32(v_t0_lo, cospi_16_64);
+    v_t0_hi = vmulq_n_s32(v_t0_hi, cospi_16_64);
+    v_t1_lo = vmulq_n_s32(v_t1_lo, cospi_16_64);
+    v_t1_hi = vmulq_n_s32(v_t1_hi, cospi_16_64);
     {
       const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
       const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
@@ -77,10 +77,10 @@
     // Stage 2
     v_x0 = vsubq_s16(v_s6, v_s5);
     v_x1 = vaddq_s16(v_s6, v_s5);
-    v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), (int16_t)cospi_16_64);
-    v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), (int16_t)cospi_16_64);
-    v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_16_64);
-    v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_16_64);
+    v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), cospi_16_64);
+    v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), cospi_16_64);
+    v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), cospi_16_64);
+    v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), cospi_16_64);
     {
       const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
       const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
@@ -95,22 +95,22 @@
       v_x3 = vaddq_s16(v_s7, cd);
     }
     // Stage 4
-    v_t0_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_4_64);
-    v_t0_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_4_64);
-    v_t0_lo = vmlal_n_s16(v_t0_lo, vget_low_s16(v_x0), (int16_t)cospi_28_64);
-    v_t0_hi = vmlal_n_s16(v_t0_hi, vget_high_s16(v_x0), (int16_t)cospi_28_64);
-    v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_12_64);
-    v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_12_64);
-    v_t1_lo = vmlal_n_s16(v_t1_lo, vget_low_s16(v_x2), (int16_t)cospi_20_64);
-    v_t1_hi = vmlal_n_s16(v_t1_hi, vget_high_s16(v_x2), (int16_t)cospi_20_64);
-    v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_12_64);
-    v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_12_64);
-    v_t2_lo = vmlsl_n_s16(v_t2_lo, vget_low_s16(v_x1), (int16_t)cospi_20_64);
-    v_t2_hi = vmlsl_n_s16(v_t2_hi, vget_high_s16(v_x1), (int16_t)cospi_20_64);
-    v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_28_64);
-    v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_28_64);
-    v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x0), (int16_t)cospi_4_64);
-    v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x0), (int16_t)cospi_4_64);
+    v_t0_lo = vmull_n_s16(vget_low_s16(v_x3), cospi_4_64);
+    v_t0_hi = vmull_n_s16(vget_high_s16(v_x3), cospi_4_64);
+    v_t0_lo = vmlal_n_s16(v_t0_lo, vget_low_s16(v_x0), cospi_28_64);
+    v_t0_hi = vmlal_n_s16(v_t0_hi, vget_high_s16(v_x0), cospi_28_64);
+    v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), cospi_12_64);
+    v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), cospi_12_64);
+    v_t1_lo = vmlal_n_s16(v_t1_lo, vget_low_s16(v_x2), cospi_20_64);
+    v_t1_hi = vmlal_n_s16(v_t1_hi, vget_high_s16(v_x2), cospi_20_64);
+    v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), cospi_12_64);
+    v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), cospi_12_64);
+    v_t2_lo = vmlsl_n_s16(v_t2_lo, vget_low_s16(v_x1), cospi_20_64);
+    v_t2_hi = vmlsl_n_s16(v_t2_hi, vget_high_s16(v_x1), cospi_20_64);
+    v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), cospi_28_64);
+    v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), cospi_28_64);
+    v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x0), cospi_4_64);
+    v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x0), cospi_4_64);
     {
       const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
       const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
--- a/vpx_dsp/arm/highbd_idct16x16_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct16x16_add_neon.c
@@ -1410,10 +1410,10 @@
 
 void vpx_highbd_idct16x16_1_add_neon(const tran_low_t *input, uint16_t *dest,
                                      int stride, int bd) {
-  const tran_low_t out0 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
-  const tran_low_t out1 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
+  const tran_low_t out0 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
+  const tran_low_t out1 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(out0 * (tran_high_t)cospi_16_64), bd);
   const int16_t a1 = ROUND_POWER_OF_TWO(out1, 6);
   const int16x8_t dc = vdupq_n_s16(a1);
   int i;
--- a/vpx_dsp/arm/highbd_idct32x32_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct32x32_add_neon.c
@@ -61,10 +61,10 @@
 
 void vpx_highbd_idct32x32_1_add_neon(const tran_low_t *input, uint16_t *dest,
                                      int stride, int bd) {
-  const tran_low_t out0 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
-  const tran_low_t out1 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
+  const tran_low_t out0 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
+  const tran_low_t out1 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(out0 * (tran_high_t)cospi_16_64), bd);
   const int16_t a1 = ROUND_POWER_OF_TWO(out1, 6);
   const int16x8_t dc = vdupq_n_s16(a1);
   int i;
--- a/vpx_dsp/arm/highbd_idct4x4_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct4x4_add_neon.c
@@ -54,10 +54,10 @@
 void vpx_highbd_idct4x4_1_add_neon(const tran_low_t *input, uint16_t *dest,
                                    int stride, int bd) {
   const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
-  const tran_low_t out0 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
-  const tran_low_t out1 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
+  const tran_low_t out0 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
+  const tran_low_t out1 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(out0 * (tran_high_t)cospi_16_64), bd);
   const int16_t a1 = ROUND_POWER_OF_TWO(out1, 4);
   const int16x8_t dc = vdupq_n_s16(a1);
 
--- a/vpx_dsp/arm/highbd_idct8x8_add_neon.c
+++ b/vpx_dsp/arm/highbd_idct8x8_add_neon.c
@@ -38,10 +38,10 @@
 
 void vpx_highbd_idct8x8_1_add_neon(const tran_low_t *input, uint16_t *dest,
                                    int stride, int bd) {
-  const tran_low_t out0 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
-  const tran_low_t out1 =
-      HIGHBD_WRAPLOW(dct_const_round_shift(out0 * cospi_16_64), bd);
+  const tran_low_t out0 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
+  const tran_low_t out1 = HIGHBD_WRAPLOW(
+      dct_const_round_shift(out0 * (tran_high_t)cospi_16_64), bd);
   const int16_t a1 = ROUND_POWER_OF_TWO(out1, 5);
   const int16x8_t dc = vdupq_n_s16(a1);
 
--- a/vpx_dsp/inv_txfm.c
+++ b/vpx_dsp/inv_txfm.c
@@ -1429,12 +1429,14 @@
   }
 
   // stage 1
-  temp1 = (input[0] + input[2]) * cospi_16_64;
-  temp2 = (input[0] - input[2]) * cospi_16_64;
+  temp1 = (input[0] + input[2]) * (tran_high_t)cospi_16_64;
+  temp2 = (input[0] - input[2]) * (tran_high_t)cospi_16_64;
   step[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64;
-  temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64;
+  temp1 =
+      input[1] * (tran_high_t)cospi_24_64 - input[3] * (tran_high_t)cospi_8_64;
+  temp2 =
+      input[1] * (tran_high_t)cospi_8_64 + input[3] * (tran_high_t)cospi_24_64;
   step[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -1474,10 +1476,11 @@
                                 int stride, int bd) {
   int i;
   tran_high_t a1;
-  tran_low_t out =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+  tran_low_t out = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
 
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+  out =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
   a1 = ROUND_POWER_OF_TWO(out, 4);
 
   for (i = 0; i < 4; i++) {
@@ -1515,14 +1518,14 @@
   }
 
   // stage 1
-  s0 = cospi_2_64 * x0 + cospi_30_64 * x1;
-  s1 = cospi_30_64 * x0 - cospi_2_64 * x1;
-  s2 = cospi_10_64 * x2 + cospi_22_64 * x3;
-  s3 = cospi_22_64 * x2 - cospi_10_64 * x3;
-  s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
-  s5 = cospi_14_64 * x4 - cospi_18_64 * x5;
-  s6 = cospi_26_64 * x6 + cospi_6_64 * x7;
-  s7 = cospi_6_64 * x6 - cospi_26_64 * x7;
+  s0 = (tran_high_t)cospi_2_64 * x0 + (tran_high_t)cospi_30_64 * x1;
+  s1 = (tran_high_t)cospi_30_64 * x0 - (tran_high_t)cospi_2_64 * x1;
+  s2 = (tran_high_t)cospi_10_64 * x2 + (tran_high_t)cospi_22_64 * x3;
+  s3 = (tran_high_t)cospi_22_64 * x2 - (tran_high_t)cospi_10_64 * x3;
+  s4 = (tran_high_t)cospi_18_64 * x4 + (tran_high_t)cospi_14_64 * x5;
+  s5 = (tran_high_t)cospi_14_64 * x4 - (tran_high_t)cospi_18_64 * x5;
+  s6 = (tran_high_t)cospi_26_64 * x6 + (tran_high_t)cospi_6_64 * x7;
+  s7 = (tran_high_t)cospi_6_64 * x6 - (tran_high_t)cospi_26_64 * x7;
 
   x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s4), bd);
   x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s5), bd);
@@ -1538,10 +1541,10 @@
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
-  s5 = cospi_24_64 * x4 - cospi_8_64 * x5;
-  s6 = -cospi_24_64 * x6 + cospi_8_64 * x7;
-  s7 = cospi_8_64 * x6 + cospi_24_64 * x7;
+  s4 = (tran_high_t)cospi_8_64 * x4 + (tran_high_t)cospi_24_64 * x5;
+  s5 = (tran_high_t)cospi_24_64 * x4 - (tran_high_t)cospi_8_64 * x5;
+  s6 = (tran_high_t)(-cospi_24_64) * x6 + (tran_high_t)cospi_8_64 * x7;
+  s7 = (tran_high_t)cospi_8_64 * x6 + (tran_high_t)cospi_24_64 * x7;
 
   x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
   x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
@@ -1553,10 +1556,10 @@
   x7 = HIGHBD_WRAPLOW(dct_const_round_shift(s5 - s7), bd);
 
   // stage 3
-  s2 = cospi_16_64 * (x2 + x3);
-  s3 = cospi_16_64 * (x2 - x3);
-  s6 = cospi_16_64 * (x6 + x7);
-  s7 = cospi_16_64 * (x6 - x7);
+  s2 = (tran_high_t)cospi_16_64 * (x2 + x3);
+  s3 = (tran_high_t)cospi_16_64 * (x2 - x3);
+  s6 = (tran_high_t)cospi_16_64 * (x6 + x7);
+  s7 = (tran_high_t)cospi_16_64 * (x6 - x7);
 
   x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
   x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
@@ -1590,12 +1593,16 @@
   step1[2] = input[4];
   step1[1] = input[2];
   step1[3] = input[6];
-  temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64;
-  temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64;
+  temp1 =
+      input[1] * (tran_high_t)cospi_28_64 - input[7] * (tran_high_t)cospi_4_64;
+  temp2 =
+      input[1] * (tran_high_t)cospi_4_64 + input[7] * (tran_high_t)cospi_28_64;
   step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64;
-  temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64;
+  temp1 =
+      input[5] * (tran_high_t)cospi_12_64 - input[3] * (tran_high_t)cospi_20_64;
+  temp2 =
+      input[5] * (tran_high_t)cospi_20_64 + input[3] * (tran_high_t)cospi_12_64;
   step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -1610,8 +1617,8 @@
 
   // stage 3 - odd half
   step1[4] = step2[4];
-  temp1 = (step2[6] - step2[5]) * cospi_16_64;
-  temp2 = (step2[5] + step2[6]) * cospi_16_64;
+  temp1 = (step2[6] - step2[5]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[5] + step2[6]) * (tran_high_t)cospi_16_64;
   step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[7] = step2[7];
@@ -1682,10 +1689,11 @@
                                 int stride, int bd) {
   int i, j;
   tran_high_t a1;
-  tran_low_t out =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+  tran_low_t out = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
 
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+  out =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
   a1 = ROUND_POWER_OF_TWO(out, 5);
   for (j = 0; j < 8; ++j) {
     for (i = 0; i < 8; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
@@ -1729,22 +1737,22 @@
   }
 
   // stage 1
-  s0 = x0 * cospi_1_64 + x1 * cospi_31_64;
-  s1 = x0 * cospi_31_64 - x1 * cospi_1_64;
-  s2 = x2 * cospi_5_64 + x3 * cospi_27_64;
-  s3 = x2 * cospi_27_64 - x3 * cospi_5_64;
-  s4 = x4 * cospi_9_64 + x5 * cospi_23_64;
-  s5 = x4 * cospi_23_64 - x5 * cospi_9_64;
-  s6 = x6 * cospi_13_64 + x7 * cospi_19_64;
-  s7 = x6 * cospi_19_64 - x7 * cospi_13_64;
-  s8 = x8 * cospi_17_64 + x9 * cospi_15_64;
-  s9 = x8 * cospi_15_64 - x9 * cospi_17_64;
-  s10 = x10 * cospi_21_64 + x11 * cospi_11_64;
-  s11 = x10 * cospi_11_64 - x11 * cospi_21_64;
-  s12 = x12 * cospi_25_64 + x13 * cospi_7_64;
-  s13 = x12 * cospi_7_64 - x13 * cospi_25_64;
-  s14 = x14 * cospi_29_64 + x15 * cospi_3_64;
-  s15 = x14 * cospi_3_64 - x15 * cospi_29_64;
+  s0 = x0 * (tran_high_t)cospi_1_64 + x1 * (tran_high_t)cospi_31_64;
+  s1 = x0 * (tran_high_t)cospi_31_64 - x1 * (tran_high_t)cospi_1_64;
+  s2 = x2 * (tran_high_t)cospi_5_64 + x3 * (tran_high_t)cospi_27_64;
+  s3 = x2 * (tran_high_t)cospi_27_64 - x3 * (tran_high_t)cospi_5_64;
+  s4 = x4 * (tran_high_t)cospi_9_64 + x5 * (tran_high_t)cospi_23_64;
+  s5 = x4 * (tran_high_t)cospi_23_64 - x5 * (tran_high_t)cospi_9_64;
+  s6 = x6 * (tran_high_t)cospi_13_64 + x7 * (tran_high_t)cospi_19_64;
+  s7 = x6 * (tran_high_t)cospi_19_64 - x7 * (tran_high_t)cospi_13_64;
+  s8 = x8 * (tran_high_t)cospi_17_64 + x9 * (tran_high_t)cospi_15_64;
+  s9 = x8 * (tran_high_t)cospi_15_64 - x9 * (tran_high_t)cospi_17_64;
+  s10 = x10 * (tran_high_t)cospi_21_64 + x11 * (tran_high_t)cospi_11_64;
+  s11 = x10 * (tran_high_t)cospi_11_64 - x11 * (tran_high_t)cospi_21_64;
+  s12 = x12 * (tran_high_t)cospi_25_64 + x13 * (tran_high_t)cospi_7_64;
+  s13 = x12 * (tran_high_t)cospi_7_64 - x13 * (tran_high_t)cospi_25_64;
+  s14 = x14 * (tran_high_t)cospi_29_64 + x15 * (tran_high_t)cospi_3_64;
+  s15 = x14 * (tran_high_t)cospi_3_64 - x15 * (tran_high_t)cospi_29_64;
 
   x0 = HIGHBD_WRAPLOW(dct_const_round_shift(s0 + s8), bd);
   x1 = HIGHBD_WRAPLOW(dct_const_round_shift(s1 + s9), bd);
@@ -1772,14 +1780,14 @@
   s5 = x5;
   s6 = x6;
   s7 = x7;
-  s8 = x8 * cospi_4_64 + x9 * cospi_28_64;
-  s9 = x8 * cospi_28_64 - x9 * cospi_4_64;
-  s10 = x10 * cospi_20_64 + x11 * cospi_12_64;
-  s11 = x10 * cospi_12_64 - x11 * cospi_20_64;
-  s12 = -x12 * cospi_28_64 + x13 * cospi_4_64;
-  s13 = x12 * cospi_4_64 + x13 * cospi_28_64;
-  s14 = -x14 * cospi_12_64 + x15 * cospi_20_64;
-  s15 = x14 * cospi_20_64 + x15 * cospi_12_64;
+  s8 = x8 * (tran_high_t)cospi_4_64 + x9 * (tran_high_t)cospi_28_64;
+  s9 = x8 * (tran_high_t)cospi_28_64 - x9 * (tran_high_t)cospi_4_64;
+  s10 = x10 * (tran_high_t)cospi_20_64 + x11 * (tran_high_t)cospi_12_64;
+  s11 = x10 * (tran_high_t)cospi_12_64 - x11 * (tran_high_t)cospi_20_64;
+  s12 = -x12 * (tran_high_t)cospi_28_64 + x13 * (tran_high_t)cospi_4_64;
+  s13 = x12 * (tran_high_t)cospi_4_64 + x13 * (tran_high_t)cospi_28_64;
+  s14 = -x14 * (tran_high_t)cospi_12_64 + x15 * (tran_high_t)cospi_20_64;
+  s15 = x14 * (tran_high_t)cospi_20_64 + x15 * (tran_high_t)cospi_12_64;
 
   x0 = HIGHBD_WRAPLOW(s0 + s4, bd);
   x1 = HIGHBD_WRAPLOW(s1 + s5, bd);
@@ -1803,18 +1811,18 @@
   s1 = x1;
   s2 = x2;
   s3 = x3;
-  s4 = x4 * cospi_8_64 + x5 * cospi_24_64;
-  s5 = x4 * cospi_24_64 - x5 * cospi_8_64;
-  s6 = -x6 * cospi_24_64 + x7 * cospi_8_64;
-  s7 = x6 * cospi_8_64 + x7 * cospi_24_64;
+  s4 = x4 * (tran_high_t)cospi_8_64 + x5 * (tran_high_t)cospi_24_64;
+  s5 = x4 * (tran_high_t)cospi_24_64 - x5 * (tran_high_t)cospi_8_64;
+  s6 = -x6 * (tran_high_t)cospi_24_64 + x7 * (tran_high_t)cospi_8_64;
+  s7 = x6 * (tran_high_t)cospi_8_64 + x7 * (tran_high_t)cospi_24_64;
   s8 = x8;
   s9 = x9;
   s10 = x10;
   s11 = x11;
-  s12 = x12 * cospi_8_64 + x13 * cospi_24_64;
-  s13 = x12 * cospi_24_64 - x13 * cospi_8_64;
-  s14 = -x14 * cospi_24_64 + x15 * cospi_8_64;
-  s15 = x14 * cospi_8_64 + x15 * cospi_24_64;
+  s12 = x12 * (tran_high_t)cospi_8_64 + x13 * (tran_high_t)cospi_24_64;
+  s13 = x12 * (tran_high_t)cospi_24_64 - x13 * (tran_high_t)cospi_8_64;
+  s14 = -x14 * (tran_high_t)cospi_24_64 + x15 * (tran_high_t)cospi_8_64;
+  s15 = x14 * (tran_high_t)cospi_8_64 + x15 * (tran_high_t)cospi_24_64;
 
   x0 = HIGHBD_WRAPLOW(s0 + s2, bd);
   x1 = HIGHBD_WRAPLOW(s1 + s3, bd);
@@ -1834,14 +1842,14 @@
   x15 = HIGHBD_WRAPLOW(dct_const_round_shift(s13 - s15), bd);
 
   // stage 4
-  s2 = (-cospi_16_64) * (x2 + x3);
-  s3 = cospi_16_64 * (x2 - x3);
-  s6 = cospi_16_64 * (x6 + x7);
-  s7 = cospi_16_64 * (-x6 + x7);
-  s10 = cospi_16_64 * (x10 + x11);
-  s11 = cospi_16_64 * (-x10 + x11);
-  s14 = (-cospi_16_64) * (x14 + x15);
-  s15 = cospi_16_64 * (x14 - x15);
+  s2 = (tran_high_t)(-cospi_16_64) * (x2 + x3);
+  s3 = (tran_high_t)cospi_16_64 * (x2 - x3);
+  s6 = (tran_high_t)cospi_16_64 * (x6 + x7);
+  s7 = (tran_high_t)cospi_16_64 * (-x6 + x7);
+  s10 = (tran_high_t)cospi_16_64 * (x10 + x11);
+  s11 = (tran_high_t)cospi_16_64 * (-x10 + x11);
+  s14 = (tran_high_t)(-cospi_16_64) * (x14 + x15);
+  s15 = (tran_high_t)cospi_16_64 * (x14 - x15);
 
   x2 = HIGHBD_WRAPLOW(dct_const_round_shift(s2), bd);
   x3 = HIGHBD_WRAPLOW(dct_const_round_shift(s3), bd);
@@ -1911,23 +1919,31 @@
   step2[6] = step1[6];
   step2[7] = step1[7];
 
-  temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
-  temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+  temp1 =
+      step1[8] * (tran_high_t)cospi_30_64 - step1[15] * (tran_high_t)cospi_2_64;
+  temp2 =
+      step1[8] * (tran_high_t)cospi_2_64 + step1[15] * (tran_high_t)cospi_30_64;
   step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
-  temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+  temp1 = step1[9] * (tran_high_t)cospi_14_64 -
+          step1[14] * (tran_high_t)cospi_18_64;
+  temp2 = step1[9] * (tran_high_t)cospi_18_64 +
+          step1[14] * (tran_high_t)cospi_14_64;
   step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
-  temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+  temp1 = step1[10] * (tran_high_t)cospi_22_64 -
+          step1[13] * (tran_high_t)cospi_10_64;
+  temp2 = step1[10] * (tran_high_t)cospi_10_64 +
+          step1[13] * (tran_high_t)cospi_22_64;
   step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
-  temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+  temp1 = step1[11] * (tran_high_t)cospi_6_64 -
+          step1[12] * (tran_high_t)cospi_26_64;
+  temp2 = step1[11] * (tran_high_t)cospi_26_64 +
+          step1[12] * (tran_high_t)cospi_6_64;
   step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -1937,12 +1953,16 @@
   step1[2] = step2[2];
   step1[3] = step2[3];
 
-  temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
-  temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+  temp1 =
+      step2[4] * (tran_high_t)cospi_28_64 - step2[7] * (tran_high_t)cospi_4_64;
+  temp2 =
+      step2[4] * (tran_high_t)cospi_4_64 + step2[7] * (tran_high_t)cospi_28_64;
   step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
-  temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+  temp1 =
+      step2[5] * (tran_high_t)cospi_12_64 - step2[6] * (tran_high_t)cospi_20_64;
+  temp2 =
+      step2[5] * (tran_high_t)cospi_20_64 + step2[6] * (tran_high_t)cospi_12_64;
   step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -1956,12 +1976,14 @@
   step1[15] = HIGHBD_WRAPLOW(step2[14] + step2[15], bd);
 
   // stage 4
-  temp1 = (step1[0] + step1[1]) * cospi_16_64;
-  temp2 = (step1[0] - step1[1]) * cospi_16_64;
+  temp1 = (step1[0] + step1[1]) * (tran_high_t)cospi_16_64;
+  temp2 = (step1[0] - step1[1]) * (tran_high_t)cospi_16_64;
   step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
-  temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+  temp1 =
+      step1[2] * (tran_high_t)cospi_24_64 - step1[3] * (tran_high_t)cospi_8_64;
+  temp2 =
+      step1[2] * (tran_high_t)cospi_8_64 + step1[3] * (tran_high_t)cospi_24_64;
   step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -1971,12 +1993,16 @@
 
   step2[8] = step1[8];
   step2[15] = step1[15];
-  temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
-  temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+  temp1 = -step1[9] * (tran_high_t)cospi_8_64 +
+          step1[14] * (tran_high_t)cospi_24_64;
+  temp2 =
+      step1[9] * (tran_high_t)cospi_24_64 + step1[14] * (tran_high_t)cospi_8_64;
   step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
-  temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+  temp1 = -step1[10] * (tran_high_t)cospi_24_64 -
+          step1[13] * (tran_high_t)cospi_8_64;
+  temp2 = -step1[10] * (tran_high_t)cospi_8_64 +
+          step1[13] * (tran_high_t)cospi_24_64;
   step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step2[11] = step1[11];
@@ -1988,8 +2014,8 @@
   step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
   step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
   step1[4] = step2[4];
-  temp1 = (step2[6] - step2[5]) * cospi_16_64;
-  temp2 = (step2[5] + step2[6]) * cospi_16_64;
+  temp1 = (step2[6] - step2[5]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[5] + step2[6]) * (tran_high_t)cospi_16_64;
   step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[7] = step2[7];
@@ -2014,12 +2040,12 @@
   step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
   step2[8] = step1[8];
   step2[9] = step1[9];
-  temp1 = (-step1[10] + step1[13]) * cospi_16_64;
-  temp2 = (step1[10] + step1[13]) * cospi_16_64;
+  temp1 = (-step1[10] + step1[13]) * (tran_high_t)cospi_16_64;
+  temp2 = (step1[10] + step1[13]) * (tran_high_t)cospi_16_64;
   step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = (-step1[11] + step1[12]) * cospi_16_64;
-  temp2 = (step1[11] + step1[12]) * cospi_16_64;
+  temp1 = (-step1[11] + step1[12]) * (tran_high_t)cospi_16_64;
+  temp2 = (step1[11] + step1[12]) * (tran_high_t)cospi_16_64;
   step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step2[14] = step1[14];
@@ -2127,10 +2153,11 @@
                                   int stride, int bd) {
   int i, j;
   tran_high_t a1;
-  tran_low_t out =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+  tran_low_t out = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
 
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+  out =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
   a1 = ROUND_POWER_OF_TWO(out, 6);
   for (j = 0; j < 16; ++j) {
     for (i = 0; i < 16; ++i) dest[i] = highbd_clip_pixel_add(dest[i], a1, bd);
@@ -2170,43 +2197,59 @@
   step1[14] = input[14];
   step1[15] = input[30];
 
-  temp1 = input[1] * cospi_31_64 - input[31] * cospi_1_64;
-  temp2 = input[1] * cospi_1_64 + input[31] * cospi_31_64;
+  temp1 =
+      input[1] * (tran_high_t)cospi_31_64 - input[31] * (tran_high_t)cospi_1_64;
+  temp2 =
+      input[1] * (tran_high_t)cospi_1_64 + input[31] * (tran_high_t)cospi_31_64;
   step1[16] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[31] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[17] * cospi_15_64 - input[15] * cospi_17_64;
-  temp2 = input[17] * cospi_17_64 + input[15] * cospi_15_64;
+  temp1 = input[17] * (tran_high_t)cospi_15_64 -
+          input[15] * (tran_high_t)cospi_17_64;
+  temp2 = input[17] * (tran_high_t)cospi_17_64 +
+          input[15] * (tran_high_t)cospi_15_64;
   step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[9] * cospi_23_64 - input[23] * cospi_9_64;
-  temp2 = input[9] * cospi_9_64 + input[23] * cospi_23_64;
+  temp1 =
+      input[9] * (tran_high_t)cospi_23_64 - input[23] * (tran_high_t)cospi_9_64;
+  temp2 =
+      input[9] * (tran_high_t)cospi_9_64 + input[23] * (tran_high_t)cospi_23_64;
   step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[25] * cospi_7_64 - input[7] * cospi_25_64;
-  temp2 = input[25] * cospi_25_64 + input[7] * cospi_7_64;
+  temp1 =
+      input[25] * (tran_high_t)cospi_7_64 - input[7] * (tran_high_t)cospi_25_64;
+  temp2 =
+      input[25] * (tran_high_t)cospi_25_64 + input[7] * (tran_high_t)cospi_7_64;
   step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[5] * cospi_27_64 - input[27] * cospi_5_64;
-  temp2 = input[5] * cospi_5_64 + input[27] * cospi_27_64;
+  temp1 =
+      input[5] * (tran_high_t)cospi_27_64 - input[27] * (tran_high_t)cospi_5_64;
+  temp2 =
+      input[5] * (tran_high_t)cospi_5_64 + input[27] * (tran_high_t)cospi_27_64;
   step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[21] * cospi_11_64 - input[11] * cospi_21_64;
-  temp2 = input[21] * cospi_21_64 + input[11] * cospi_11_64;
+  temp1 = input[21] * (tran_high_t)cospi_11_64 -
+          input[11] * (tran_high_t)cospi_21_64;
+  temp2 = input[21] * (tran_high_t)cospi_21_64 +
+          input[11] * (tran_high_t)cospi_11_64;
   step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[13] * cospi_19_64 - input[19] * cospi_13_64;
-  temp2 = input[13] * cospi_13_64 + input[19] * cospi_19_64;
+  temp1 = input[13] * (tran_high_t)cospi_19_64 -
+          input[19] * (tran_high_t)cospi_13_64;
+  temp2 = input[13] * (tran_high_t)cospi_13_64 +
+          input[19] * (tran_high_t)cospi_19_64;
   step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = input[29] * cospi_3_64 - input[3] * cospi_29_64;
-  temp2 = input[29] * cospi_29_64 + input[3] * cospi_3_64;
+  temp1 =
+      input[29] * (tran_high_t)cospi_3_64 - input[3] * (tran_high_t)cospi_29_64;
+  temp2 =
+      input[29] * (tran_high_t)cospi_29_64 + input[3] * (tran_high_t)cospi_3_64;
   step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -2220,23 +2263,31 @@
   step2[6] = step1[6];
   step2[7] = step1[7];
 
-  temp1 = step1[8] * cospi_30_64 - step1[15] * cospi_2_64;
-  temp2 = step1[8] * cospi_2_64 + step1[15] * cospi_30_64;
+  temp1 =
+      step1[8] * (tran_high_t)cospi_30_64 - step1[15] * (tran_high_t)cospi_2_64;
+  temp2 =
+      step1[8] * (tran_high_t)cospi_2_64 + step1[15] * (tran_high_t)cospi_30_64;
   step2[8] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[15] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = step1[9] * cospi_14_64 - step1[14] * cospi_18_64;
-  temp2 = step1[9] * cospi_18_64 + step1[14] * cospi_14_64;
+  temp1 = step1[9] * (tran_high_t)cospi_14_64 -
+          step1[14] * (tran_high_t)cospi_18_64;
+  temp2 = step1[9] * (tran_high_t)cospi_18_64 +
+          step1[14] * (tran_high_t)cospi_14_64;
   step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = step1[10] * cospi_22_64 - step1[13] * cospi_10_64;
-  temp2 = step1[10] * cospi_10_64 + step1[13] * cospi_22_64;
+  temp1 = step1[10] * (tran_high_t)cospi_22_64 -
+          step1[13] * (tran_high_t)cospi_10_64;
+  temp2 = step1[10] * (tran_high_t)cospi_10_64 +
+          step1[13] * (tran_high_t)cospi_22_64;
   step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
-  temp1 = step1[11] * cospi_6_64 - step1[12] * cospi_26_64;
-  temp2 = step1[11] * cospi_26_64 + step1[12] * cospi_6_64;
+  temp1 = step1[11] * (tran_high_t)cospi_6_64 -
+          step1[12] * (tran_high_t)cospi_26_64;
+  temp2 = step1[11] * (tran_high_t)cospi_26_64 +
+          step1[12] * (tran_high_t)cospi_6_64;
   step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -2263,12 +2314,16 @@
   step1[2] = step2[2];
   step1[3] = step2[3];
 
-  temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64;
-  temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64;
+  temp1 =
+      step2[4] * (tran_high_t)cospi_28_64 - step2[7] * (tran_high_t)cospi_4_64;
+  temp2 =
+      step2[4] * (tran_high_t)cospi_4_64 + step2[7] * (tran_high_t)cospi_28_64;
   step1[4] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[7] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64;
-  temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64;
+  temp1 =
+      step2[5] * (tran_high_t)cospi_12_64 - step2[6] * (tran_high_t)cospi_20_64;
+  temp2 =
+      step2[5] * (tran_high_t)cospi_20_64 + step2[6] * (tran_high_t)cospi_12_64;
   step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
 
@@ -2283,22 +2338,30 @@
 
   step1[16] = step2[16];
   step1[31] = step2[31];
-  temp1 = -step2[17] * cospi_4_64 + step2[30] * cospi_28_64;
-  temp2 = step2[17] * cospi_28_64 + step2[30] * cospi_4_64;
+  temp1 = -step2[17] * (tran_high_t)cospi_4_64 +
+          step2[30] * (tran_high_t)cospi_28_64;
+  temp2 = step2[17] * (tran_high_t)cospi_28_64 +
+          step2[30] * (tran_high_t)cospi_4_64;
   step1[17] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[30] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step2[18] * cospi_28_64 - step2[29] * cospi_4_64;
-  temp2 = -step2[18] * cospi_4_64 + step2[29] * cospi_28_64;
+  temp1 = -step2[18] * (tran_high_t)cospi_28_64 -
+          step2[29] * (tran_high_t)cospi_4_64;
+  temp2 = -step2[18] * (tran_high_t)cospi_4_64 +
+          step2[29] * (tran_high_t)cospi_28_64;
   step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  temp1 = -step2[21] * cospi_20_64 + step2[26] * cospi_12_64;
-  temp2 = step2[21] * cospi_12_64 + step2[26] * cospi_20_64;
+  temp1 = -step2[21] * (tran_high_t)cospi_20_64 +
+          step2[26] * (tran_high_t)cospi_12_64;
+  temp2 = step2[21] * (tran_high_t)cospi_12_64 +
+          step2[26] * (tran_high_t)cospi_20_64;
   step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step2[22] * cospi_12_64 - step2[25] * cospi_20_64;
-  temp2 = -step2[22] * cospi_20_64 + step2[25] * cospi_12_64;
+  temp1 = -step2[22] * (tran_high_t)cospi_12_64 -
+          step2[25] * (tran_high_t)cospi_20_64;
+  temp2 = -step2[22] * (tran_high_t)cospi_20_64 +
+          step2[25] * (tran_high_t)cospi_12_64;
   step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[23] = step2[23];
@@ -2307,12 +2370,14 @@
   step1[28] = step2[28];
 
   // stage 4
-  temp1 = (step1[0] + step1[1]) * cospi_16_64;
-  temp2 = (step1[0] - step1[1]) * cospi_16_64;
+  temp1 = (step1[0] + step1[1]) * (tran_high_t)cospi_16_64;
+  temp2 = (step1[0] - step1[1]) * (tran_high_t)cospi_16_64;
   step2[0] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[1] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = step1[2] * cospi_24_64 - step1[3] * cospi_8_64;
-  temp2 = step1[2] * cospi_8_64 + step1[3] * cospi_24_64;
+  temp1 =
+      step1[2] * (tran_high_t)cospi_24_64 - step1[3] * (tran_high_t)cospi_8_64;
+  temp2 =
+      step1[2] * (tran_high_t)cospi_8_64 + step1[3] * (tran_high_t)cospi_24_64;
   step2[2] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[3] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step2[4] = HIGHBD_WRAPLOW(step1[4] + step1[5], bd);
@@ -2322,12 +2387,16 @@
 
   step2[8] = step1[8];
   step2[15] = step1[15];
-  temp1 = -step1[9] * cospi_8_64 + step1[14] * cospi_24_64;
-  temp2 = step1[9] * cospi_24_64 + step1[14] * cospi_8_64;
+  temp1 = -step1[9] * (tran_high_t)cospi_8_64 +
+          step1[14] * (tran_high_t)cospi_24_64;
+  temp2 =
+      step1[9] * (tran_high_t)cospi_24_64 + step1[14] * (tran_high_t)cospi_8_64;
   step2[9] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[14] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step1[10] * cospi_24_64 - step1[13] * cospi_8_64;
-  temp2 = -step1[10] * cospi_8_64 + step1[13] * cospi_24_64;
+  temp1 = -step1[10] * (tran_high_t)cospi_24_64 -
+          step1[13] * (tran_high_t)cospi_8_64;
+  temp2 = -step1[10] * (tran_high_t)cospi_8_64 +
+          step1[13] * (tran_high_t)cospi_24_64;
   step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step2[11] = step1[11];
@@ -2357,8 +2426,8 @@
   step1[2] = HIGHBD_WRAPLOW(step2[1] - step2[2], bd);
   step1[3] = HIGHBD_WRAPLOW(step2[0] - step2[3], bd);
   step1[4] = step2[4];
-  temp1 = (step2[6] - step2[5]) * cospi_16_64;
-  temp2 = (step2[5] + step2[6]) * cospi_16_64;
+  temp1 = (step2[6] - step2[5]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[5] + step2[6]) * (tran_high_t)cospi_16_64;
   step1[5] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[6] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[7] = step2[7];
@@ -2374,20 +2443,28 @@
 
   step1[16] = step2[16];
   step1[17] = step2[17];
-  temp1 = -step2[18] * cospi_8_64 + step2[29] * cospi_24_64;
-  temp2 = step2[18] * cospi_24_64 + step2[29] * cospi_8_64;
+  temp1 = -step2[18] * (tran_high_t)cospi_8_64 +
+          step2[29] * (tran_high_t)cospi_24_64;
+  temp2 = step2[18] * (tran_high_t)cospi_24_64 +
+          step2[29] * (tran_high_t)cospi_8_64;
   step1[18] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[29] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step2[19] * cospi_8_64 + step2[28] * cospi_24_64;
-  temp2 = step2[19] * cospi_24_64 + step2[28] * cospi_8_64;
+  temp1 = -step2[19] * (tran_high_t)cospi_8_64 +
+          step2[28] * (tran_high_t)cospi_24_64;
+  temp2 = step2[19] * (tran_high_t)cospi_24_64 +
+          step2[28] * (tran_high_t)cospi_8_64;
   step1[19] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[28] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step2[20] * cospi_24_64 - step2[27] * cospi_8_64;
-  temp2 = -step2[20] * cospi_8_64 + step2[27] * cospi_24_64;
+  temp1 = -step2[20] * (tran_high_t)cospi_24_64 -
+          step2[27] * (tran_high_t)cospi_8_64;
+  temp2 = -step2[20] * (tran_high_t)cospi_8_64 +
+          step2[27] * (tran_high_t)cospi_24_64;
   step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = -step2[21] * cospi_24_64 - step2[26] * cospi_8_64;
-  temp2 = -step2[21] * cospi_8_64 + step2[26] * cospi_24_64;
+  temp1 = -step2[21] * (tran_high_t)cospi_24_64 -
+          step2[26] * (tran_high_t)cospi_8_64;
+  temp2 = -step2[21] * (tran_high_t)cospi_8_64 +
+          step2[26] * (tran_high_t)cospi_24_64;
   step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[22] = step2[22];
@@ -2408,12 +2485,12 @@
   step2[7] = HIGHBD_WRAPLOW(step1[0] - step1[7], bd);
   step2[8] = step1[8];
   step2[9] = step1[9];
-  temp1 = (-step1[10] + step1[13]) * cospi_16_64;
-  temp2 = (step1[10] + step1[13]) * cospi_16_64;
+  temp1 = (-step1[10] + step1[13]) * (tran_high_t)cospi_16_64;
+  temp2 = (step1[10] + step1[13]) * (tran_high_t)cospi_16_64;
   step2[10] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[13] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = (-step1[11] + step1[12]) * cospi_16_64;
-  temp2 = (step1[11] + step1[12]) * cospi_16_64;
+  temp1 = (-step1[11] + step1[12]) * (tran_high_t)cospi_16_64;
+  temp2 = (step1[11] + step1[12]) * (tran_high_t)cospi_16_64;
   step2[11] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step2[12] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step2[14] = step1[14];
@@ -2459,20 +2536,20 @@
   step1[17] = step2[17];
   step1[18] = step2[18];
   step1[19] = step2[19];
-  temp1 = (-step2[20] + step2[27]) * cospi_16_64;
-  temp2 = (step2[20] + step2[27]) * cospi_16_64;
+  temp1 = (-step2[20] + step2[27]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[20] + step2[27]) * (tran_high_t)cospi_16_64;
   step1[20] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[27] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = (-step2[21] + step2[26]) * cospi_16_64;
-  temp2 = (step2[21] + step2[26]) * cospi_16_64;
+  temp1 = (-step2[21] + step2[26]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[21] + step2[26]) * (tran_high_t)cospi_16_64;
   step1[21] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[26] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = (-step2[22] + step2[25]) * cospi_16_64;
-  temp2 = (step2[22] + step2[25]) * cospi_16_64;
+  temp1 = (-step2[22] + step2[25]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[22] + step2[25]) * (tran_high_t)cospi_16_64;
   step1[22] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[25] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
-  temp1 = (-step2[23] + step2[24]) * cospi_16_64;
-  temp2 = (step2[23] + step2[24]) * cospi_16_64;
+  temp1 = (-step2[23] + step2[24]) * (tran_high_t)cospi_16_64;
+  temp2 = (step2[23] + step2[24]) * (tran_high_t)cospi_16_64;
   step1[23] = HIGHBD_WRAPLOW(dct_const_round_shift(temp1), bd);
   step1[24] = HIGHBD_WRAPLOW(dct_const_round_shift(temp2), bd);
   step1[28] = step2[28];
@@ -2604,10 +2681,11 @@
                                   int stride, int bd) {
   int i, j;
   int a1;
-  tran_low_t out =
-      HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
+  tran_low_t out = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
 
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+  out =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
   a1 = ROUND_POWER_OF_TWO(out, 6);
 
   for (j = 0; j < 32; ++j) {
--- a/vpx_dsp/txfm_common.h
+++ b/vpx_dsp/txfm_common.h
@@ -25,37 +25,37 @@
 //    printf("static const int cospi_%d_64 = %.0f;\n", i,
 //           round(16384 * cos(i*M_PI/64)));
 // Note: sin(k*Pi/64) = cos((32-k)*Pi/64)
-static const tran_high_t cospi_1_64 = 16364;
-static const tran_high_t cospi_2_64 = 16305;
-static const tran_high_t cospi_3_64 = 16207;
-static const tran_high_t cospi_4_64 = 16069;
-static const tran_high_t cospi_5_64 = 15893;
-static const tran_high_t cospi_6_64 = 15679;
-static const tran_high_t cospi_7_64 = 15426;
-static const tran_high_t cospi_8_64 = 15137;
-static const tran_high_t cospi_9_64 = 14811;
-static const tran_high_t cospi_10_64 = 14449;
-static const tran_high_t cospi_11_64 = 14053;
-static const tran_high_t cospi_12_64 = 13623;
-static const tran_high_t cospi_13_64 = 13160;
-static const tran_high_t cospi_14_64 = 12665;
-static const tran_high_t cospi_15_64 = 12140;
-static const tran_high_t cospi_16_64 = 11585;
-static const tran_high_t cospi_17_64 = 11003;
-static const tran_high_t cospi_18_64 = 10394;
-static const tran_high_t cospi_19_64 = 9760;
-static const tran_high_t cospi_20_64 = 9102;
-static const tran_high_t cospi_21_64 = 8423;
-static const tran_high_t cospi_22_64 = 7723;
-static const tran_high_t cospi_23_64 = 7005;
-static const tran_high_t cospi_24_64 = 6270;
-static const tran_high_t cospi_25_64 = 5520;
-static const tran_high_t cospi_26_64 = 4756;
-static const tran_high_t cospi_27_64 = 3981;
-static const tran_high_t cospi_28_64 = 3196;
-static const tran_high_t cospi_29_64 = 2404;
-static const tran_high_t cospi_30_64 = 1606;
-static const tran_high_t cospi_31_64 = 804;
+static const tran_coef_t cospi_1_64 = 16364;
+static const tran_coef_t cospi_2_64 = 16305;
+static const tran_coef_t cospi_3_64 = 16207;
+static const tran_coef_t cospi_4_64 = 16069;
+static const tran_coef_t cospi_5_64 = 15893;
+static const tran_coef_t cospi_6_64 = 15679;
+static const tran_coef_t cospi_7_64 = 15426;
+static const tran_coef_t cospi_8_64 = 15137;
+static const tran_coef_t cospi_9_64 = 14811;
+static const tran_coef_t cospi_10_64 = 14449;
+static const tran_coef_t cospi_11_64 = 14053;
+static const tran_coef_t cospi_12_64 = 13623;
+static const tran_coef_t cospi_13_64 = 13160;
+static const tran_coef_t cospi_14_64 = 12665;
+static const tran_coef_t cospi_15_64 = 12140;
+static const tran_coef_t cospi_16_64 = 11585;
+static const tran_coef_t cospi_17_64 = 11003;
+static const tran_coef_t cospi_18_64 = 10394;
+static const tran_coef_t cospi_19_64 = 9760;
+static const tran_coef_t cospi_20_64 = 9102;
+static const tran_coef_t cospi_21_64 = 8423;
+static const tran_coef_t cospi_22_64 = 7723;
+static const tran_coef_t cospi_23_64 = 7005;
+static const tran_coef_t cospi_24_64 = 6270;
+static const tran_coef_t cospi_25_64 = 5520;
+static const tran_coef_t cospi_26_64 = 4756;
+static const tran_coef_t cospi_27_64 = 3981;
+static const tran_coef_t cospi_28_64 = 3196;
+static const tran_coef_t cospi_29_64 = 2404;
+static const tran_coef_t cospi_30_64 = 1606;
+static const tran_coef_t cospi_31_64 = 804;
 
 //  16384 * sqrt(2) * sin(kPi/9) * 2 / 3
 static const tran_coef_t sinpi_1_9 = 5283;
--- a/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h
+++ b/vpx_dsp/x86/fwd_dct32x32_impl_avx2.h
@@ -51,7 +51,7 @@
   //    When we use them, in one case, they are all the same. In all others
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
-  const __m256i k__cospi_p16_p16 = _mm256_set1_epi16((int16_t)cospi_16_64);
+  const __m256i k__cospi_p16_p16 = _mm256_set1_epi16(cospi_16_64);
   const __m256i k__cospi_p16_m16 =
       pair256_set_epi16(+cospi_16_64, -cospi_16_64);
   const __m256i k__cospi_m08_p24 = pair256_set_epi16(-cospi_8_64, cospi_24_64);
--- a/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h
+++ b/vpx_dsp/x86/fwd_dct32x32_impl_sse2.h
@@ -63,7 +63,7 @@
   //    When we use them, in one case, they are all the same. In all others
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
   const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
--- a/vpx_dsp/x86/fwd_txfm_impl_sse2.h
+++ b/vpx_dsp/x86/fwd_txfm_impl_sse2.h
@@ -261,7 +261,7 @@
   //    When we use them, in one case, they are all the same. In all others
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
   const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
@@ -582,7 +582,7 @@
   //    When we use them, in one case, they are all the same. In all others
   //    it's a pair of them that we need to repeat four times. This is done
   //    by constructing the 32 bit constant corresponding to that pair.
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
   const __m128i k__cospi_p08_m24 = pair_set_epi16(cospi_8_64, -cospi_24_64);
--- a/vpx_dsp/x86/highbd_idct16x16_add_sse2.c
+++ b/vpx_dsp/x86/highbd_idct16x16_add_sse2.c
@@ -56,20 +56,20 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  highbd_butterfly_sse2(io[1], io[15], (int)cospi_30_64, (int)cospi_2_64,
-                        &step2[8], &step2[15]);
-  highbd_butterfly_sse2(io[9], io[7], (int)cospi_14_64, (int)cospi_18_64,
-                        &step2[9], &step2[14]);
-  highbd_butterfly_sse2(io[5], io[11], (int)cospi_22_64, (int)cospi_10_64,
-                        &step2[10], &step2[13]);
-  highbd_butterfly_sse2(io[13], io[3], (int)cospi_6_64, (int)cospi_26_64,
-                        &step2[11], &step2[12]);
+  highbd_butterfly_sse2(io[1], io[15], cospi_30_64, cospi_2_64, &step2[8],
+                        &step2[15]);
+  highbd_butterfly_sse2(io[9], io[7], cospi_14_64, cospi_18_64, &step2[9],
+                        &step2[14]);
+  highbd_butterfly_sse2(io[5], io[11], cospi_22_64, cospi_10_64, &step2[10],
+                        &step2[13]);
+  highbd_butterfly_sse2(io[13], io[3], cospi_6_64, cospi_26_64, &step2[11],
+                        &step2[12]);
 
   // stage 3
-  highbd_butterfly_sse2(io[2], io[14], (int)cospi_28_64, (int)cospi_4_64,
-                        &step1[4], &step1[7]);
-  highbd_butterfly_sse2(io[10], io[6], (int)cospi_12_64, (int)cospi_20_64,
-                        &step1[5], &step1[6]);
+  highbd_butterfly_sse2(io[2], io[14], cospi_28_64, cospi_4_64, &step1[4],
+                        &step1[7]);
+  highbd_butterfly_sse2(io[10], io[6], cospi_12_64, cospi_20_64, &step1[5],
+                        &step1[6]);
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
   step1[9] = _mm_sub_epi32(step2[8], step2[9]);
   step1[10] = _mm_sub_epi32(step2[10], step2[11]);  // step1[10] = -step1[10]
@@ -81,11 +81,11 @@
 
   // stage 4
   highbd_butterfly_cospi16_sse2(io[0], io[8], &step2[0], &step2[1]);
-  highbd_butterfly_sse2(io[4], io[12], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[2], &step2[3]);
-  highbd_butterfly_sse2(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[9], &step2[14]);
-  highbd_butterfly_sse2(step1[10], step1[13], (int)cospi_8_64, (int)cospi_24_64,
+  highbd_butterfly_sse2(io[4], io[12], cospi_24_64, cospi_8_64, &step2[2],
+                        &step2[3]);
+  highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
+                        &step2[14]);
+  highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64,
                         &step2[13], &step2[10]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step1[4] = _mm_add_epi32(step1[4], step1[5]);
@@ -106,20 +106,20 @@
   __m128i temp1[2], sign[2];
 
   // stage 2
-  highbd_partial_butterfly_sse2(io[1], (int)cospi_30_64, (int)cospi_2_64,
-                                &step2[8], &step2[15]);
-  highbd_partial_butterfly_neg_sse2(io[7], (int)cospi_14_64, (int)cospi_18_64,
-                                    &step2[9], &step2[14]);
-  highbd_partial_butterfly_sse2(io[5], (int)cospi_22_64, (int)cospi_10_64,
-                                &step2[10], &step2[13]);
-  highbd_partial_butterfly_neg_sse2(io[3], (int)cospi_6_64, (int)cospi_26_64,
-                                    &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse2(io[1], cospi_30_64, cospi_2_64, &step2[8],
+                                &step2[15]);
+  highbd_partial_butterfly_neg_sse2(io[7], cospi_14_64, cospi_18_64, &step2[9],
+                                    &step2[14]);
+  highbd_partial_butterfly_sse2(io[5], cospi_22_64, cospi_10_64, &step2[10],
+                                &step2[13]);
+  highbd_partial_butterfly_neg_sse2(io[3], cospi_6_64, cospi_26_64, &step2[11],
+                                    &step2[12]);
 
   // stage 3
-  highbd_partial_butterfly_sse2(io[2], (int)cospi_28_64, (int)cospi_4_64,
-                                &step1[4], &step1[7]);
-  highbd_partial_butterfly_neg_sse2(io[6], (int)cospi_12_64, (int)cospi_20_64,
-                                    &step1[5], &step1[6]);
+  highbd_partial_butterfly_sse2(io[2], cospi_28_64, cospi_4_64, &step1[4],
+                                &step1[7]);
+  highbd_partial_butterfly_neg_sse2(io[6], cospi_12_64, cospi_20_64, &step1[5],
+                                    &step1[6]);
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
   step1[9] = _mm_sub_epi32(step2[8], step2[9]);
   step1[10] = _mm_sub_epi32(step2[10], step2[11]);  // step1[10] = -step1[10]
@@ -131,13 +131,13 @@
 
   // stage 4
   abs_extend_64bit_sse2(io[0], temp1, sign);
-  step2[0] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_16_64);
+  step2[0] = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
   step2[1] = step2[0];
-  highbd_partial_butterfly_sse2(io[4], (int)cospi_24_64, (int)cospi_8_64,
-                                &step2[2], &step2[3]);
-  highbd_butterfly_sse2(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[9], &step2[14]);
-  highbd_butterfly_sse2(step1[10], step1[13], (int)cospi_8_64, (int)cospi_24_64,
+  highbd_partial_butterfly_sse2(io[4], cospi_24_64, cospi_8_64, &step2[2],
+                                &step2[3]);
+  highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
+                        &step2[14]);
+  highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64,
                         &step2[13], &step2[10]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step1[4] = _mm_add_epi32(step1[4], step1[5]);
@@ -158,14 +158,14 @@
   __m128i temp[2], sign[2];
 
   // stage 2
-  highbd_partial_butterfly_sse2(io[1], (int)cospi_30_64, (int)cospi_2_64,
-                                &step2[8], &step2[15]);
-  highbd_partial_butterfly_neg_sse2(io[3], (int)cospi_6_64, (int)cospi_26_64,
-                                    &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse2(io[1], cospi_30_64, cospi_2_64, &step2[8],
+                                &step2[15]);
+  highbd_partial_butterfly_neg_sse2(io[3], cospi_6_64, cospi_26_64, &step2[11],
+                                    &step2[12]);
 
   // stage 3
-  highbd_partial_butterfly_sse2(io[2], (int)cospi_28_64, (int)cospi_4_64,
-                                &step1[4], &step1[7]);
+  highbd_partial_butterfly_sse2(io[2], cospi_28_64, cospi_4_64, &step1[4],
+                                &step1[7]);
   step1[8] = step2[8];
   step1[9] = step2[8];
   step1[10] =
@@ -179,13 +179,13 @@
 
   // stage 4
   abs_extend_64bit_sse2(io[0], temp, sign);
-  step2[0] = multiplication_round_shift_sse2(temp, sign, (int)cospi_16_64);
+  step2[0] = multiplication_round_shift_sse2(temp, sign, cospi_16_64);
   step2[1] = step2[0];
   step2[2] = _mm_setzero_si128();
   step2[3] = _mm_setzero_si128();
-  highbd_butterfly_sse2(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[9], &step2[14]);
-  highbd_butterfly_sse2(step1[10], step1[13], (int)cospi_8_64, (int)cospi_24_64,
+  highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
+                        &step2[14]);
+  highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64,
                         &step2[13], &step2[10]);
   step2[5] = step1[4];
   step2[6] = step1[7];
--- a/vpx_dsp/x86/highbd_idct16x16_add_sse4.c
+++ b/vpx_dsp/x86/highbd_idct16x16_add_sse4.c
@@ -57,20 +57,20 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  highbd_butterfly_sse4_1(io[1], io[15], (int)cospi_30_64, (int)cospi_2_64,
-                          &step2[8], &step2[15]);
-  highbd_butterfly_sse4_1(io[9], io[7], (int)cospi_14_64, (int)cospi_18_64,
-                          &step2[9], &step2[14]);
-  highbd_butterfly_sse4_1(io[5], io[11], (int)cospi_22_64, (int)cospi_10_64,
-                          &step2[10], &step2[13]);
-  highbd_butterfly_sse4_1(io[13], io[3], (int)cospi_6_64, (int)cospi_26_64,
-                          &step2[11], &step2[12]);
+  highbd_butterfly_sse4_1(io[1], io[15], cospi_30_64, cospi_2_64, &step2[8],
+                          &step2[15]);
+  highbd_butterfly_sse4_1(io[9], io[7], cospi_14_64, cospi_18_64, &step2[9],
+                          &step2[14]);
+  highbd_butterfly_sse4_1(io[5], io[11], cospi_22_64, cospi_10_64, &step2[10],
+                          &step2[13]);
+  highbd_butterfly_sse4_1(io[13], io[3], cospi_6_64, cospi_26_64, &step2[11],
+                          &step2[12]);
 
   // stage 3
-  highbd_butterfly_sse4_1(io[2], io[14], (int)cospi_28_64, (int)cospi_4_64,
-                          &step1[4], &step1[7]);
-  highbd_butterfly_sse4_1(io[10], io[6], (int)cospi_12_64, (int)cospi_20_64,
-                          &step1[5], &step1[6]);
+  highbd_butterfly_sse4_1(io[2], io[14], cospi_28_64, cospi_4_64, &step1[4],
+                          &step1[7]);
+  highbd_butterfly_sse4_1(io[10], io[6], cospi_12_64, cospi_20_64, &step1[5],
+                          &step1[6]);
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
   step1[9] = _mm_sub_epi32(step2[8], step2[9]);
   step1[10] = _mm_sub_epi32(step2[11], step2[10]);
@@ -82,12 +82,12 @@
 
   // stage 4
   highbd_butterfly_cospi16_sse4_1(io[0], io[8], &step2[0], &step2[1]);
-  highbd_butterfly_sse4_1(io[4], io[12], (int)cospi_24_64, (int)cospi_8_64,
-                          &step2[2], &step2[3]);
-  highbd_butterfly_sse4_1(step1[14], step1[9], (int)cospi_24_64,
-                          (int)cospi_8_64, &step2[9], &step2[14]);
-  highbd_butterfly_sse4_1(step1[10], step1[13], -(int)cospi_8_64,
-                          -(int)cospi_24_64, &step2[13], &step2[10]);
+  highbd_butterfly_sse4_1(io[4], io[12], cospi_24_64, cospi_8_64, &step2[2],
+                          &step2[3]);
+  highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64,
+                          &step2[9], &step2[14]);
+  highbd_butterfly_sse4_1(step1[10], step1[13], -cospi_8_64, -cospi_24_64,
+                          &step2[13], &step2[10]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step1[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -107,20 +107,20 @@
   __m128i temp1[2];
 
   // stage 2
-  highbd_partial_butterfly_sse4_1(io[1], (int)cospi_30_64, (int)cospi_2_64,
-                                  &step2[8], &step2[15]);
-  highbd_partial_butterfly_sse4_1(io[7], -(int)cospi_18_64, (int)cospi_14_64,
-                                  &step2[9], &step2[14]);
-  highbd_partial_butterfly_sse4_1(io[5], (int)cospi_22_64, (int)cospi_10_64,
-                                  &step2[10], &step2[13]);
-  highbd_partial_butterfly_sse4_1(io[3], -(int)cospi_26_64, (int)cospi_6_64,
-                                  &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse4_1(io[1], cospi_30_64, cospi_2_64, &step2[8],
+                                  &step2[15]);
+  highbd_partial_butterfly_sse4_1(io[7], -cospi_18_64, cospi_14_64, &step2[9],
+                                  &step2[14]);
+  highbd_partial_butterfly_sse4_1(io[5], cospi_22_64, cospi_10_64, &step2[10],
+                                  &step2[13]);
+  highbd_partial_butterfly_sse4_1(io[3], -cospi_26_64, cospi_6_64, &step2[11],
+                                  &step2[12]);
 
   // stage 3
-  highbd_partial_butterfly_sse4_1(io[2], (int)cospi_28_64, (int)cospi_4_64,
-                                  &step1[4], &step1[7]);
-  highbd_partial_butterfly_sse4_1(io[6], -(int)cospi_20_64, (int)cospi_12_64,
-                                  &step1[5], &step1[6]);
+  highbd_partial_butterfly_sse4_1(io[2], cospi_28_64, cospi_4_64, &step1[4],
+                                  &step1[7]);
+  highbd_partial_butterfly_sse4_1(io[6], -cospi_20_64, cospi_12_64, &step1[5],
+                                  &step1[6]);
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
   step1[9] = _mm_sub_epi32(step2[8], step2[9]);
   step1[10] = _mm_sub_epi32(step2[11], step2[10]);
@@ -132,14 +132,14 @@
 
   // stage 4
   extend_64bit(io[0], temp1);
-  step2[0] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
+  step2[0] = multiplication_round_shift_sse4_1(temp1, cospi_16_64);
   step2[1] = step2[0];
-  highbd_partial_butterfly_sse4_1(io[4], (int)cospi_24_64, (int)cospi_8_64,
-                                  &step2[2], &step2[3]);
-  highbd_butterfly_sse4_1(step1[14], step1[9], (int)cospi_24_64,
-                          (int)cospi_8_64, &step2[9], &step2[14]);
-  highbd_butterfly_sse4_1(step1[10], step1[13], -(int)cospi_8_64,
-                          -(int)cospi_24_64, &step2[13], &step2[10]);
+  highbd_partial_butterfly_sse4_1(io[4], cospi_24_64, cospi_8_64, &step2[2],
+                                  &step2[3]);
+  highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64,
+                          &step2[9], &step2[14]);
+  highbd_butterfly_sse4_1(step1[10], step1[13], -cospi_8_64, -cospi_24_64,
+                          &step2[13], &step2[10]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step1[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -159,14 +159,14 @@
   __m128i temp[2];
 
   // stage 2
-  highbd_partial_butterfly_sse4_1(io[1], (int)cospi_30_64, (int)cospi_2_64,
-                                  &step2[8], &step2[15]);
-  highbd_partial_butterfly_sse4_1(io[3], -(int)cospi_26_64, (int)cospi_6_64,
-                                  &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse4_1(io[1], cospi_30_64, cospi_2_64, &step2[8],
+                                  &step2[15]);
+  highbd_partial_butterfly_sse4_1(io[3], -cospi_26_64, cospi_6_64, &step2[11],
+                                  &step2[12]);
 
   // stage 3
-  highbd_partial_butterfly_sse4_1(io[2], (int)cospi_28_64, (int)cospi_4_64,
-                                  &step1[4], &step1[7]);
+  highbd_partial_butterfly_sse4_1(io[2], cospi_28_64, cospi_4_64, &step1[4],
+                                  &step1[7]);
   step1[8] = step2[8];
   step1[9] = step2[8];
   step1[10] = step2[11];
@@ -178,14 +178,14 @@
 
   // stage 4
   extend_64bit(io[0], temp);
-  step2[0] = multiplication_round_shift_sse4_1(temp, (int)cospi_16_64);
+  step2[0] = multiplication_round_shift_sse4_1(temp, cospi_16_64);
   step2[1] = step2[0];
   step2[2] = _mm_setzero_si128();
   step2[3] = _mm_setzero_si128();
-  highbd_butterfly_sse4_1(step1[14], step1[9], (int)cospi_24_64,
-                          (int)cospi_8_64, &step2[9], &step2[14]);
-  highbd_butterfly_sse4_1(step1[10], step1[13], -(int)cospi_8_64,
-                          -(int)cospi_24_64, &step2[13], &step2[10]);
+  highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64,
+                          &step2[9], &step2[14]);
+  highbd_butterfly_sse4_1(step1[10], step1[13], -cospi_8_64, -cospi_24_64,
+                          &step2[13], &step2[10]);
   step2[5] = step1[4];
   step2[6] = step1[7];
   step2[8] = step1[8];
--- a/vpx_dsp/x86/highbd_idct32x32_add_sse2.c
+++ b/vpx_dsp/x86/highbd_idct32x32_add_sse2.c
@@ -21,9 +21,9 @@
   // stage 4
   step2[8] = step1[8];
   step2[15] = step1[15];
-  highbd_butterfly_sse2(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[9], &step2[14]);
-  highbd_butterfly_sse2(step1[10], step1[13], (int)cospi_8_64, (int)cospi_24_64,
+  highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
+                        &step2[14]);
+  highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64,
                         &step2[13], &step2[10]);
   step2[11] = step1[11];
   step2[12] = step1[12];
@@ -41,10 +41,10 @@
   // stage 6
   out[8] = step1[8];
   out[9] = step1[9];
-  highbd_butterfly_sse2(step1[13], step1[10], (int)cospi_16_64,
-                        (int)cospi_16_64, &out[10], &out[13]);
-  highbd_butterfly_sse2(step1[12], step1[11], (int)cospi_16_64,
-                        (int)cospi_16_64, &out[11], &out[12]);
+  highbd_butterfly_sse2(step1[13], step1[10], cospi_16_64, cospi_16_64,
+                        &out[10], &out[13]);
+  highbd_butterfly_sse2(step1[12], step1[11], cospi_16_64, cospi_16_64,
+                        &out[11], &out[12]);
   out[14] = step1[14];
   out[15] = step1[15];
 }
@@ -75,13 +75,13 @@
   // stage 5
   step1[16] = step2[16];
   step1[17] = step2[17];
-  highbd_butterfly_sse2(step2[29], step2[18], (int)cospi_24_64, (int)cospi_8_64,
+  highbd_butterfly_sse2(step2[29], step2[18], cospi_24_64, cospi_8_64,
                         &step1[18], &step1[29]);
-  highbd_butterfly_sse2(step2[28], step2[19], (int)cospi_24_64, (int)cospi_8_64,
+  highbd_butterfly_sse2(step2[28], step2[19], cospi_24_64, cospi_8_64,
                         &step1[19], &step1[28]);
-  highbd_butterfly_sse2(step2[20], step2[27], (int)cospi_8_64, (int)cospi_24_64,
+  highbd_butterfly_sse2(step2[20], step2[27], cospi_8_64, cospi_24_64,
                         &step1[27], &step1[20]);
-  highbd_butterfly_sse2(step2[21], step2[26], (int)cospi_8_64, (int)cospi_24_64,
+  highbd_butterfly_sse2(step2[21], step2[26], cospi_8_64, cospi_24_64,
                         &step1[26], &step1[21]);
   step1[22] = step2[22];
   step1[23] = step2[23];
@@ -114,14 +114,14 @@
   out[17] = step2[17];
   out[18] = step2[18];
   out[19] = step2[19];
-  highbd_butterfly_sse2(step2[27], step2[20], (int)cospi_16_64,
-                        (int)cospi_16_64, &out[20], &out[27]);
-  highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_16_64,
-                        (int)cospi_16_64, &out[21], &out[26]);
-  highbd_butterfly_sse2(step2[25], step2[22], (int)cospi_16_64,
-                        (int)cospi_16_64, &out[22], &out[25]);
-  highbd_butterfly_sse2(step2[24], step2[23], (int)cospi_16_64,
-                        (int)cospi_16_64, &out[23], &out[24]);
+  highbd_butterfly_sse2(step2[27], step2[20], cospi_16_64, cospi_16_64,
+                        &out[20], &out[27]);
+  highbd_butterfly_sse2(step2[26], step2[21], cospi_16_64, cospi_16_64,
+                        &out[21], &out[26]);
+  highbd_butterfly_sse2(step2[25], step2[22], cospi_16_64, cospi_16_64,
+                        &out[22], &out[25]);
+  highbd_butterfly_sse2(step2[24], step2[23], cospi_16_64, cospi_16_64,
+                        &out[23], &out[24]);
   out[28] = step2[28];
   out[29] = step2[29];
   out[30] = step2[30];
@@ -142,16 +142,16 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  highbd_butterfly_sse2(in[4], in[28], (int)cospi_28_64, (int)cospi_4_64,
-                        &step1[4], &step1[7]);
-  highbd_butterfly_sse2(in[20], in[12], (int)cospi_12_64, (int)cospi_20_64,
-                        &step1[5], &step1[6]);
+  highbd_butterfly_sse2(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4],
+                        &step1[7]);
+  highbd_butterfly_sse2(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5],
+                        &step1[6]);
 
   // stage 4
-  highbd_butterfly_sse2(in[0], in[16], (int)cospi_16_64, (int)cospi_16_64,
-                        &step2[1], &step2[0]);
-  highbd_butterfly_sse2(in[8], in[24], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[2], &step2[3]);
+  highbd_butterfly_sse2(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1],
+                        &step2[0]);
+  highbd_butterfly_sse2(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2],
+                        &step2[3]);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -163,8 +163,8 @@
   step1[2] = _mm_sub_epi32(step2[1], step2[2]);
   step1[3] = _mm_sub_epi32(step2[0], step2[3]);
   step1[4] = step2[4];
-  highbd_butterfly_sse2(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
-                        &step1[5], &step1[6]);
+  highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
+                        &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -186,14 +186,14 @@
   __m128i step1[32], step2[32];
 
   // stage 2
-  highbd_butterfly_sse2(in[2], in[30], (int)cospi_30_64, (int)cospi_2_64,
-                        &step2[8], &step2[15]);
-  highbd_butterfly_sse2(in[18], in[14], (int)cospi_14_64, (int)cospi_18_64,
-                        &step2[9], &step2[14]);
-  highbd_butterfly_sse2(in[10], in[22], (int)cospi_22_64, (int)cospi_10_64,
-                        &step2[10], &step2[13]);
-  highbd_butterfly_sse2(in[26], in[6], (int)cospi_6_64, (int)cospi_26_64,
-                        &step2[11], &step2[12]);
+  highbd_butterfly_sse2(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8],
+                        &step2[15]);
+  highbd_butterfly_sse2(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9],
+                        &step2[14]);
+  highbd_butterfly_sse2(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10],
+                        &step2[13]);
+  highbd_butterfly_sse2(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11],
+                        &step2[12]);
 
   // stage 3
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
@@ -226,24 +226,24 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  highbd_butterfly_sse2(in[1], in[31], (int)cospi_31_64, (int)cospi_1_64,
-                        &step1[16], &step1[31]);
-  highbd_butterfly_sse2(in[17], in[15], (int)cospi_15_64, (int)cospi_17_64,
-                        &step1[17], &step1[30]);
-  highbd_butterfly_sse2(in[9], in[23], (int)cospi_23_64, (int)cospi_9_64,
-                        &step1[18], &step1[29]);
-  highbd_butterfly_sse2(in[25], in[7], (int)cospi_7_64, (int)cospi_25_64,
-                        &step1[19], &step1[28]);
+  highbd_butterfly_sse2(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16],
+                        &step1[31]);
+  highbd_butterfly_sse2(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17],
+                        &step1[30]);
+  highbd_butterfly_sse2(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18],
+                        &step1[29]);
+  highbd_butterfly_sse2(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19],
+                        &step1[28]);
 
-  highbd_butterfly_sse2(in[5], in[27], (int)cospi_27_64, (int)cospi_5_64,
-                        &step1[20], &step1[27]);
-  highbd_butterfly_sse2(in[21], in[11], (int)cospi_11_64, (int)cospi_21_64,
-                        &step1[21], &step1[26]);
+  highbd_butterfly_sse2(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20],
+                        &step1[27]);
+  highbd_butterfly_sse2(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21],
+                        &step1[26]);
 
-  highbd_butterfly_sse2(in[13], in[19], (int)cospi_19_64, (int)cospi_13_64,
-                        &step1[22], &step1[25]);
-  highbd_butterfly_sse2(in[29], in[3], (int)cospi_3_64, (int)cospi_29_64,
-                        &step1[23], &step1[24]);
+  highbd_butterfly_sse2(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22],
+                        &step1[25]);
+  highbd_butterfly_sse2(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23],
+                        &step1[24]);
 
   // stage 2
   step2[16] = _mm_add_epi32(step1[16], step1[17]);
@@ -267,16 +267,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  highbd_butterfly_sse2(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64,
+  highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64,
                         &step1[17], &step1[30]);
-  highbd_butterfly_sse2(step2[18], step2[29], (int)cospi_4_64, (int)cospi_28_64,
+  highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64,
                         &step1[29], &step1[18]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_12_64,
-                        (int)cospi_20_64, &step1[21], &step1[26]);
-  highbd_butterfly_sse2(step2[22], step2[25], (int)cospi_20_64,
-                        (int)cospi_12_64, &step1[25], &step1[22]);
+  highbd_butterfly_sse2(step2[26], step2[21], cospi_12_64, cospi_20_64,
+                        &step1[21], &step1[26]);
+  highbd_butterfly_sse2(step2[22], step2[25], cospi_20_64, cospi_12_64,
+                        &step1[25], &step1[22]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
@@ -366,16 +366,16 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  highbd_partial_butterfly_sse2(in[4], (int)cospi_28_64, (int)cospi_4_64,
-                                &step1[4], &step1[7]);
-  highbd_partial_butterfly_neg_sse2(in[12], (int)cospi_12_64, (int)cospi_20_64,
-                                    &step1[5], &step1[6]);
+  highbd_partial_butterfly_sse2(in[4], cospi_28_64, cospi_4_64, &step1[4],
+                                &step1[7]);
+  highbd_partial_butterfly_neg_sse2(in[12], cospi_12_64, cospi_20_64, &step1[5],
+                                    &step1[6]);
 
   // stage 4
-  highbd_partial_butterfly_sse2(in[0], (int)cospi_16_64, (int)cospi_16_64,
-                                &step2[1], &step2[0]);
-  highbd_partial_butterfly_sse2(in[8], (int)cospi_24_64, (int)cospi_8_64,
-                                &step2[2], &step2[3]);
+  highbd_partial_butterfly_sse2(in[0], cospi_16_64, cospi_16_64, &step2[1],
+                                &step2[0]);
+  highbd_partial_butterfly_sse2(in[8], cospi_24_64, cospi_8_64, &step2[2],
+                                &step2[3]);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -387,8 +387,8 @@
   step1[2] = _mm_sub_epi32(step2[1], step2[2]);
   step1[3] = _mm_sub_epi32(step2[0], step2[3]);
   step1[4] = step2[4];
-  highbd_butterfly_sse2(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
-                        &step1[5], &step1[6]);
+  highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
+                        &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -410,14 +410,14 @@
   __m128i step1[32], step2[32];
 
   // stage 2
-  highbd_partial_butterfly_sse2(in[2], (int)cospi_30_64, (int)cospi_2_64,
-                                &step2[8], &step2[15]);
-  highbd_partial_butterfly_neg_sse2(in[14], (int)cospi_14_64, (int)cospi_18_64,
-                                    &step2[9], &step2[14]);
-  highbd_partial_butterfly_sse2(in[10], (int)cospi_22_64, (int)cospi_10_64,
-                                &step2[10], &step2[13]);
-  highbd_partial_butterfly_neg_sse2(in[6], (int)cospi_6_64, (int)cospi_26_64,
-                                    &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse2(in[2], cospi_30_64, cospi_2_64, &step2[8],
+                                &step2[15]);
+  highbd_partial_butterfly_neg_sse2(in[14], cospi_14_64, cospi_18_64, &step2[9],
+                                    &step2[14]);
+  highbd_partial_butterfly_sse2(in[10], cospi_22_64, cospi_10_64, &step2[10],
+                                &step2[13]);
+  highbd_partial_butterfly_neg_sse2(in[6], cospi_6_64, cospi_26_64, &step2[11],
+                                    &step2[12]);
 
   // stage 3
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
@@ -450,24 +450,24 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  highbd_partial_butterfly_sse2(in[1], (int)cospi_31_64, (int)cospi_1_64,
-                                &step1[16], &step1[31]);
-  highbd_partial_butterfly_neg_sse2(in[15], (int)cospi_15_64, (int)cospi_17_64,
+  highbd_partial_butterfly_sse2(in[1], cospi_31_64, cospi_1_64, &step1[16],
+                                &step1[31]);
+  highbd_partial_butterfly_neg_sse2(in[15], cospi_15_64, cospi_17_64,
                                     &step1[17], &step1[30]);
-  highbd_partial_butterfly_sse2(in[9], (int)cospi_23_64, (int)cospi_9_64,
-                                &step1[18], &step1[29]);
-  highbd_partial_butterfly_neg_sse2(in[7], (int)cospi_7_64, (int)cospi_25_64,
-                                    &step1[19], &step1[28]);
+  highbd_partial_butterfly_sse2(in[9], cospi_23_64, cospi_9_64, &step1[18],
+                                &step1[29]);
+  highbd_partial_butterfly_neg_sse2(in[7], cospi_7_64, cospi_25_64, &step1[19],
+                                    &step1[28]);
 
-  highbd_partial_butterfly_sse2(in[5], (int)cospi_27_64, (int)cospi_5_64,
-                                &step1[20], &step1[27]);
-  highbd_partial_butterfly_neg_sse2(in[11], (int)cospi_11_64, (int)cospi_21_64,
+  highbd_partial_butterfly_sse2(in[5], cospi_27_64, cospi_5_64, &step1[20],
+                                &step1[27]);
+  highbd_partial_butterfly_neg_sse2(in[11], cospi_11_64, cospi_21_64,
                                     &step1[21], &step1[26]);
 
-  highbd_partial_butterfly_sse2(in[13], (int)cospi_19_64, (int)cospi_13_64,
-                                &step1[22], &step1[25]);
-  highbd_partial_butterfly_neg_sse2(in[3], (int)cospi_3_64, (int)cospi_29_64,
-                                    &step1[23], &step1[24]);
+  highbd_partial_butterfly_sse2(in[13], cospi_19_64, cospi_13_64, &step1[22],
+                                &step1[25]);
+  highbd_partial_butterfly_neg_sse2(in[3], cospi_3_64, cospi_29_64, &step1[23],
+                                    &step1[24]);
 
   // stage 2
   step2[16] = _mm_add_epi32(step1[16], step1[17]);
@@ -491,16 +491,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  highbd_butterfly_sse2(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64,
+  highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64,
                         &step1[17], &step1[30]);
-  highbd_butterfly_sse2(step2[18], step2[29], (int)cospi_4_64, (int)cospi_28_64,
+  highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64,
                         &step1[29], &step1[18]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_12_64,
-                        (int)cospi_20_64, &step1[21], &step1[26]);
-  highbd_butterfly_sse2(step2[22], step2[25], (int)cospi_20_64,
-                        (int)cospi_12_64, &step1[25], &step1[22]);
+  highbd_butterfly_sse2(step2[26], step2[21], cospi_12_64, cospi_20_64,
+                        &step1[21], &step1[26]);
+  highbd_butterfly_sse2(step2[22], step2[25], cospi_20_64, cospi_12_64,
+                        &step1[25], &step1[22]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
@@ -583,12 +583,12 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  highbd_partial_butterfly_sse2(in[4], (int)cospi_28_64, (int)cospi_4_64,
-                                &step1[4], &step1[7]);
+  highbd_partial_butterfly_sse2(in[4], cospi_28_64, cospi_4_64, &step1[4],
+                                &step1[7]);
 
   // stage 4
-  highbd_partial_butterfly_sse2(in[0], (int)cospi_16_64, (int)cospi_16_64,
-                                &step2[1], &step2[0]);
+  highbd_partial_butterfly_sse2(in[0], cospi_16_64, cospi_16_64, &step2[1],
+                                &step2[0]);
   step2[4] = step1[4];
   step2[5] = step1[4];
   step2[6] = step1[7];
@@ -600,8 +600,8 @@
   step1[2] = step2[1];
   step1[3] = step2[0];
   step1[4] = step2[4];
-  highbd_butterfly_sse2(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64,
-                        &step1[5], &step1[6]);
+  highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5],
+                        &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -623,10 +623,10 @@
   __m128i step1[32], step2[32];
 
   // stage 2
-  highbd_partial_butterfly_sse2(in[2], (int)cospi_30_64, (int)cospi_2_64,
-                                &step2[8], &step2[15]);
-  highbd_partial_butterfly_neg_sse2(in[6], (int)cospi_6_64, (int)cospi_26_64,
-                                    &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse2(in[2], cospi_30_64, cospi_2_64, &step2[8],
+                                &step2[15]);
+  highbd_partial_butterfly_neg_sse2(in[6], cospi_6_64, cospi_26_64, &step2[11],
+                                    &step2[12]);
 
   // stage 3
   step1[8] = step2[8];
@@ -663,15 +663,15 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  highbd_partial_butterfly_sse2(in[1], (int)cospi_31_64, (int)cospi_1_64,
-                                &step1[16], &step1[31]);
-  highbd_partial_butterfly_neg_sse2(in[7], (int)cospi_7_64, (int)cospi_25_64,
-                                    &step1[19], &step1[28]);
+  highbd_partial_butterfly_sse2(in[1], cospi_31_64, cospi_1_64, &step1[16],
+                                &step1[31]);
+  highbd_partial_butterfly_neg_sse2(in[7], cospi_7_64, cospi_25_64, &step1[19],
+                                    &step1[28]);
 
-  highbd_partial_butterfly_sse2(in[5], (int)cospi_27_64, (int)cospi_5_64,
-                                &step1[20], &step1[27]);
-  highbd_partial_butterfly_neg_sse2(in[3], (int)cospi_3_64, (int)cospi_29_64,
-                                    &step1[23], &step1[24]);
+  highbd_partial_butterfly_sse2(in[5], cospi_27_64, cospi_5_64, &step1[20],
+                                &step1[27]);
+  highbd_partial_butterfly_neg_sse2(in[3], cospi_3_64, cospi_29_64, &step1[23],
+                                    &step1[24]);
 
   // stage 2
   step2[16] = step1[16];
@@ -703,16 +703,16 @@
       _mm_sub_epi32(_mm_setzero_si128(), step2[29]);  // step2[29] = -step2[29]
   step1[16] = step2[16];
   step1[31] = step2[31];
-  highbd_butterfly_sse2(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64,
+  highbd_butterfly_sse2(step2[30], step2[17], cospi_28_64, cospi_4_64,
                         &step1[17], &step1[30]);
-  highbd_butterfly_sse2(step2[18], step2[29], (int)cospi_4_64, (int)cospi_28_64,
+  highbd_butterfly_sse2(step2[18], step2[29], cospi_4_64, cospi_28_64,
                         &step1[29], &step1[18]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  highbd_butterfly_sse2(step2[26], step2[21], (int)cospi_12_64,
-                        (int)cospi_20_64, &step1[21], &step1[26]);
-  highbd_butterfly_sse2(step2[22], step2[25], (int)cospi_20_64,
-                        (int)cospi_12_64, &step1[25], &step1[22]);
+  highbd_butterfly_sse2(step2[26], step2[21], cospi_12_64, cospi_20_64,
+                        &step1[21], &step1[26]);
+  highbd_butterfly_sse2(step2[22], step2[25], cospi_20_64, cospi_12_64,
+                        &step1[25], &step1[22]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
--- a/vpx_dsp/x86/highbd_idct32x32_add_sse4.c
+++ b/vpx_dsp/x86/highbd_idct32x32_add_sse4.c
@@ -25,10 +25,10 @@
   // stage 4
   step2[8] = step1[8];
   step2[15] = step1[15];
-  highbd_butterfly_sse4_1(step1[14], step1[9], (int)cospi_24_64,
-                          (int)cospi_8_64, &step2[9], &step2[14]);
-  highbd_butterfly_sse4_1(step1[13], step1[10], -(int)cospi_8_64,
-                          (int)cospi_24_64, &step2[10], &step2[13]);
+  highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64,
+                          &step2[9], &step2[14]);
+  highbd_butterfly_sse4_1(step1[13], step1[10], -cospi_8_64, cospi_24_64,
+                          &step2[10], &step2[13]);
   step2[11] = step1[11];
   step2[12] = step1[12];
 
@@ -45,10 +45,10 @@
   // stage 6
   out[8] = step1[8];
   out[9] = step1[9];
-  highbd_butterfly_sse4_1(step1[13], step1[10], (int)cospi_16_64,
-                          (int)cospi_16_64, &out[10], &out[13]);
-  highbd_butterfly_sse4_1(step1[12], step1[11], (int)cospi_16_64,
-                          (int)cospi_16_64, &out[11], &out[12]);
+  highbd_butterfly_sse4_1(step1[13], step1[10], cospi_16_64, cospi_16_64,
+                          &out[10], &out[13]);
+  highbd_butterfly_sse4_1(step1[12], step1[11], cospi_16_64, cospi_16_64,
+                          &out[11], &out[12]);
   out[14] = step1[14];
   out[15] = step1[15];
 }
@@ -79,14 +79,14 @@
   // stage 5
   step1[16] = step2[16];
   step1[17] = step2[17];
-  highbd_butterfly_sse4_1(step2[29], step2[18], (int)cospi_24_64,
-                          (int)cospi_8_64, &step1[18], &step1[29]);
-  highbd_butterfly_sse4_1(step2[28], step2[19], (int)cospi_24_64,
-                          (int)cospi_8_64, &step1[19], &step1[28]);
-  highbd_butterfly_sse4_1(step2[27], step2[20], -(int)cospi_8_64,
-                          (int)cospi_24_64, &step1[20], &step1[27]);
-  highbd_butterfly_sse4_1(step2[26], step2[21], -(int)cospi_8_64,
-                          (int)cospi_24_64, &step1[21], &step1[26]);
+  highbd_butterfly_sse4_1(step2[29], step2[18], cospi_24_64, cospi_8_64,
+                          &step1[18], &step1[29]);
+  highbd_butterfly_sse4_1(step2[28], step2[19], cospi_24_64, cospi_8_64,
+                          &step1[19], &step1[28]);
+  highbd_butterfly_sse4_1(step2[27], step2[20], -cospi_8_64, cospi_24_64,
+                          &step1[20], &step1[27]);
+  highbd_butterfly_sse4_1(step2[26], step2[21], -cospi_8_64, cospi_24_64,
+                          &step1[21], &step1[26]);
   step1[22] = step2[22];
   step1[23] = step2[23];
   step1[24] = step2[24];
@@ -118,14 +118,14 @@
   out[17] = step2[17];
   out[18] = step2[18];
   out[19] = step2[19];
-  highbd_butterfly_sse4_1(step2[27], step2[20], (int)cospi_16_64,
-                          (int)cospi_16_64, &out[20], &out[27]);
-  highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_16_64,
-                          (int)cospi_16_64, &out[21], &out[26]);
-  highbd_butterfly_sse4_1(step2[25], step2[22], (int)cospi_16_64,
-                          (int)cospi_16_64, &out[22], &out[25]);
-  highbd_butterfly_sse4_1(step2[24], step2[23], (int)cospi_16_64,
-                          (int)cospi_16_64, &out[23], &out[24]);
+  highbd_butterfly_sse4_1(step2[27], step2[20], cospi_16_64, cospi_16_64,
+                          &out[20], &out[27]);
+  highbd_butterfly_sse4_1(step2[26], step2[21], cospi_16_64, cospi_16_64,
+                          &out[21], &out[26]);
+  highbd_butterfly_sse4_1(step2[25], step2[22], cospi_16_64, cospi_16_64,
+                          &out[22], &out[25]);
+  highbd_butterfly_sse4_1(step2[24], step2[23], cospi_16_64, cospi_16_64,
+                          &out[23], &out[24]);
   out[28] = step2[28];
   out[29] = step2[29];
   out[30] = step2[30];
@@ -146,16 +146,16 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  highbd_butterfly_sse4_1(in[4], in[28], (int)cospi_28_64, (int)cospi_4_64,
-                          &step1[4], &step1[7]);
-  highbd_butterfly_sse4_1(in[20], in[12], (int)cospi_12_64, (int)cospi_20_64,
-                          &step1[5], &step1[6]);
+  highbd_butterfly_sse4_1(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4],
+                          &step1[7]);
+  highbd_butterfly_sse4_1(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5],
+                          &step1[6]);
 
   // stage 4
-  highbd_butterfly_sse4_1(in[0], in[16], (int)cospi_16_64, (int)cospi_16_64,
-                          &step2[1], &step2[0]);
-  highbd_butterfly_sse4_1(in[8], in[24], (int)cospi_24_64, (int)cospi_8_64,
-                          &step2[2], &step2[3]);
+  highbd_butterfly_sse4_1(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1],
+                          &step2[0]);
+  highbd_butterfly_sse4_1(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2],
+                          &step2[3]);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -167,8 +167,8 @@
   step1[2] = _mm_sub_epi32(step2[1], step2[2]);
   step1[3] = _mm_sub_epi32(step2[0], step2[3]);
   step1[4] = step2[4];
-  highbd_butterfly_sse4_1(step2[6], step2[5], (int)cospi_16_64,
-                          (int)cospi_16_64, &step1[5], &step1[6]);
+  highbd_butterfly_sse4_1(step2[6], step2[5], cospi_16_64, cospi_16_64,
+                          &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -190,14 +190,14 @@
   __m128i step1[32], step2[32];
 
   // stage 2
-  highbd_butterfly_sse4_1(in[2], in[30], (int)cospi_30_64, (int)cospi_2_64,
-                          &step2[8], &step2[15]);
-  highbd_butterfly_sse4_1(in[18], in[14], (int)cospi_14_64, (int)cospi_18_64,
-                          &step2[9], &step2[14]);
-  highbd_butterfly_sse4_1(in[10], in[22], (int)cospi_22_64, (int)cospi_10_64,
-                          &step2[10], &step2[13]);
-  highbd_butterfly_sse4_1(in[26], in[6], (int)cospi_6_64, (int)cospi_26_64,
-                          &step2[11], &step2[12]);
+  highbd_butterfly_sse4_1(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8],
+                          &step2[15]);
+  highbd_butterfly_sse4_1(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9],
+                          &step2[14]);
+  highbd_butterfly_sse4_1(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10],
+                          &step2[13]);
+  highbd_butterfly_sse4_1(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11],
+                          &step2[12]);
 
   // stage 3
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
@@ -230,24 +230,24 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  highbd_butterfly_sse4_1(in[1], in[31], (int)cospi_31_64, (int)cospi_1_64,
-                          &step1[16], &step1[31]);
-  highbd_butterfly_sse4_1(in[17], in[15], (int)cospi_15_64, (int)cospi_17_64,
-                          &step1[17], &step1[30]);
-  highbd_butterfly_sse4_1(in[9], in[23], (int)cospi_23_64, (int)cospi_9_64,
-                          &step1[18], &step1[29]);
-  highbd_butterfly_sse4_1(in[25], in[7], (int)cospi_7_64, (int)cospi_25_64,
-                          &step1[19], &step1[28]);
+  highbd_butterfly_sse4_1(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16],
+                          &step1[31]);
+  highbd_butterfly_sse4_1(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17],
+                          &step1[30]);
+  highbd_butterfly_sse4_1(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18],
+                          &step1[29]);
+  highbd_butterfly_sse4_1(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19],
+                          &step1[28]);
 
-  highbd_butterfly_sse4_1(in[5], in[27], (int)cospi_27_64, (int)cospi_5_64,
-                          &step1[20], &step1[27]);
-  highbd_butterfly_sse4_1(in[21], in[11], (int)cospi_11_64, (int)cospi_21_64,
-                          &step1[21], &step1[26]);
+  highbd_butterfly_sse4_1(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20],
+                          &step1[27]);
+  highbd_butterfly_sse4_1(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21],
+                          &step1[26]);
 
-  highbd_butterfly_sse4_1(in[13], in[19], (int)cospi_19_64, (int)cospi_13_64,
-                          &step1[22], &step1[25]);
-  highbd_butterfly_sse4_1(in[29], in[3], (int)cospi_3_64, (int)cospi_29_64,
-                          &step1[23], &step1[24]);
+  highbd_butterfly_sse4_1(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22],
+                          &step1[25]);
+  highbd_butterfly_sse4_1(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23],
+                          &step1[24]);
 
   // stage 2
   step2[16] = _mm_add_epi32(step1[16], step1[17]);
@@ -271,16 +271,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  highbd_butterfly_sse4_1(step2[30], step2[17], (int)cospi_28_64,
-                          (int)cospi_4_64, &step1[17], &step1[30]);
-  highbd_butterfly_sse4_1(step2[29], step2[18], -(int)cospi_4_64,
-                          (int)cospi_28_64, &step1[18], &step1[29]);
+  highbd_butterfly_sse4_1(step2[30], step2[17], cospi_28_64, cospi_4_64,
+                          &step1[17], &step1[30]);
+  highbd_butterfly_sse4_1(step2[29], step2[18], -cospi_4_64, cospi_28_64,
+                          &step1[18], &step1[29]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_12_64,
-                          (int)cospi_20_64, &step1[21], &step1[26]);
-  highbd_butterfly_sse4_1(step2[25], step2[22], -(int)cospi_20_64,
-                          (int)cospi_12_64, &step1[22], &step1[25]);
+  highbd_butterfly_sse4_1(step2[26], step2[21], cospi_12_64, cospi_20_64,
+                          &step1[21], &step1[26]);
+  highbd_butterfly_sse4_1(step2[25], step2[22], -cospi_20_64, cospi_12_64,
+                          &step1[22], &step1[25]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
@@ -370,16 +370,16 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  highbd_partial_butterfly_sse4_1(in[4], (int)cospi_28_64, (int)cospi_4_64,
-                                  &step1[4], &step1[7]);
-  highbd_partial_butterfly_sse4_1(in[12], -(int)cospi_20_64, (int)cospi_12_64,
-                                  &step1[5], &step1[6]);
+  highbd_partial_butterfly_sse4_1(in[4], cospi_28_64, cospi_4_64, &step1[4],
+                                  &step1[7]);
+  highbd_partial_butterfly_sse4_1(in[12], -cospi_20_64, cospi_12_64, &step1[5],
+                                  &step1[6]);
 
   // stage 4
-  highbd_partial_butterfly_sse4_1(in[0], (int)cospi_16_64, (int)cospi_16_64,
-                                  &step2[1], &step2[0]);
-  highbd_partial_butterfly_sse4_1(in[8], (int)cospi_24_64, (int)cospi_8_64,
-                                  &step2[2], &step2[3]);
+  highbd_partial_butterfly_sse4_1(in[0], cospi_16_64, cospi_16_64, &step2[1],
+                                  &step2[0]);
+  highbd_partial_butterfly_sse4_1(in[8], cospi_24_64, cospi_8_64, &step2[2],
+                                  &step2[3]);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -391,8 +391,8 @@
   step1[2] = _mm_sub_epi32(step2[1], step2[2]);
   step1[3] = _mm_sub_epi32(step2[0], step2[3]);
   step1[4] = step2[4];
-  highbd_butterfly_sse4_1(step2[6], step2[5], (int)cospi_16_64,
-                          (int)cospi_16_64, &step1[5], &step1[6]);
+  highbd_butterfly_sse4_1(step2[6], step2[5], cospi_16_64, cospi_16_64,
+                          &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -414,14 +414,14 @@
   __m128i step1[32], step2[32];
 
   // stage 2
-  highbd_partial_butterfly_sse4_1(in[2], (int)cospi_30_64, (int)cospi_2_64,
-                                  &step2[8], &step2[15]);
-  highbd_partial_butterfly_sse4_1(in[14], -(int)cospi_18_64, (int)cospi_14_64,
-                                  &step2[9], &step2[14]);
-  highbd_partial_butterfly_sse4_1(in[10], (int)cospi_22_64, (int)cospi_10_64,
-                                  &step2[10], &step2[13]);
-  highbd_partial_butterfly_sse4_1(in[6], -(int)cospi_26_64, (int)cospi_6_64,
-                                  &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse4_1(in[2], cospi_30_64, cospi_2_64, &step2[8],
+                                  &step2[15]);
+  highbd_partial_butterfly_sse4_1(in[14], -cospi_18_64, cospi_14_64, &step2[9],
+                                  &step2[14]);
+  highbd_partial_butterfly_sse4_1(in[10], cospi_22_64, cospi_10_64, &step2[10],
+                                  &step2[13]);
+  highbd_partial_butterfly_sse4_1(in[6], -cospi_26_64, cospi_6_64, &step2[11],
+                                  &step2[12]);
 
   // stage 3
   step1[8] = _mm_add_epi32(step2[8], step2[9]);
@@ -454,24 +454,24 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  highbd_partial_butterfly_sse4_1(in[1], (int)cospi_31_64, (int)cospi_1_64,
-                                  &step1[16], &step1[31]);
-  highbd_partial_butterfly_sse4_1(in[15], -(int)cospi_17_64, (int)cospi_15_64,
-                                  &step1[17], &step1[30]);
-  highbd_partial_butterfly_sse4_1(in[9], (int)cospi_23_64, (int)cospi_9_64,
-                                  &step1[18], &step1[29]);
-  highbd_partial_butterfly_sse4_1(in[7], -(int)cospi_25_64, (int)cospi_7_64,
-                                  &step1[19], &step1[28]);
+  highbd_partial_butterfly_sse4_1(in[1], cospi_31_64, cospi_1_64, &step1[16],
+                                  &step1[31]);
+  highbd_partial_butterfly_sse4_1(in[15], -cospi_17_64, cospi_15_64, &step1[17],
+                                  &step1[30]);
+  highbd_partial_butterfly_sse4_1(in[9], cospi_23_64, cospi_9_64, &step1[18],
+                                  &step1[29]);
+  highbd_partial_butterfly_sse4_1(in[7], -cospi_25_64, cospi_7_64, &step1[19],
+                                  &step1[28]);
 
-  highbd_partial_butterfly_sse4_1(in[5], (int)cospi_27_64, (int)cospi_5_64,
-                                  &step1[20], &step1[27]);
-  highbd_partial_butterfly_sse4_1(in[11], -(int)cospi_21_64, (int)cospi_11_64,
-                                  &step1[21], &step1[26]);
+  highbd_partial_butterfly_sse4_1(in[5], cospi_27_64, cospi_5_64, &step1[20],
+                                  &step1[27]);
+  highbd_partial_butterfly_sse4_1(in[11], -cospi_21_64, cospi_11_64, &step1[21],
+                                  &step1[26]);
 
-  highbd_partial_butterfly_sse4_1(in[13], (int)cospi_19_64, (int)cospi_13_64,
-                                  &step1[22], &step1[25]);
-  highbd_partial_butterfly_sse4_1(in[3], -(int)cospi_29_64, (int)cospi_3_64,
-                                  &step1[23], &step1[24]);
+  highbd_partial_butterfly_sse4_1(in[13], cospi_19_64, cospi_13_64, &step1[22],
+                                  &step1[25]);
+  highbd_partial_butterfly_sse4_1(in[3], -cospi_29_64, cospi_3_64, &step1[23],
+                                  &step1[24]);
 
   // stage 2
   step2[16] = _mm_add_epi32(step1[16], step1[17]);
@@ -495,16 +495,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  highbd_butterfly_sse4_1(step2[30], step2[17], (int)cospi_28_64,
-                          (int)cospi_4_64, &step1[17], &step1[30]);
-  highbd_butterfly_sse4_1(step2[29], step2[18], -(int)cospi_4_64,
-                          (int)cospi_28_64, &step1[18], &step1[29]);
+  highbd_butterfly_sse4_1(step2[30], step2[17], cospi_28_64, cospi_4_64,
+                          &step1[17], &step1[30]);
+  highbd_butterfly_sse4_1(step2[29], step2[18], -cospi_4_64, cospi_28_64,
+                          &step1[18], &step1[29]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_12_64,
-                          (int)cospi_20_64, &step1[21], &step1[26]);
-  highbd_butterfly_sse4_1(step2[25], step2[22], -(int)cospi_20_64,
-                          (int)cospi_12_64, &step1[22], &step1[25]);
+  highbd_butterfly_sse4_1(step2[26], step2[21], cospi_12_64, cospi_20_64,
+                          &step1[21], &step1[26]);
+  highbd_butterfly_sse4_1(step2[25], step2[22], -cospi_20_64, cospi_12_64,
+                          &step1[22], &step1[25]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
@@ -583,12 +583,12 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  highbd_partial_butterfly_sse4_1(in[4], (int)cospi_28_64, (int)cospi_4_64,
-                                  &step1[4], &step1[7]);
+  highbd_partial_butterfly_sse4_1(in[4], cospi_28_64, cospi_4_64, &step1[4],
+                                  &step1[7]);
 
   // stage 4
-  highbd_partial_butterfly_sse4_1(in[0], (int)cospi_16_64, (int)cospi_16_64,
-                                  &step2[1], &step2[0]);
+  highbd_partial_butterfly_sse4_1(in[0], cospi_16_64, cospi_16_64, &step2[1],
+                                  &step2[0]);
   step2[4] = step1[4];
   step2[5] = step1[4];
   step2[6] = step1[7];
@@ -600,8 +600,8 @@
   step1[2] = step2[1];
   step1[3] = step2[0];
   step1[4] = step2[4];
-  highbd_butterfly_sse4_1(step2[6], step2[5], (int)cospi_16_64,
-                          (int)cospi_16_64, &step1[5], &step1[6]);
+  highbd_butterfly_sse4_1(step2[6], step2[5], cospi_16_64, cospi_16_64,
+                          &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -623,10 +623,10 @@
   __m128i step1[32], step2[32];
 
   // stage 2
-  highbd_partial_butterfly_sse4_1(in[2], (int)cospi_30_64, (int)cospi_2_64,
-                                  &step2[8], &step2[15]);
-  highbd_partial_butterfly_sse4_1(in[6], -(int)cospi_26_64, (int)cospi_6_64,
-                                  &step2[11], &step2[12]);
+  highbd_partial_butterfly_sse4_1(in[2], cospi_30_64, cospi_2_64, &step2[8],
+                                  &step2[15]);
+  highbd_partial_butterfly_sse4_1(in[6], -cospi_26_64, cospi_6_64, &step2[11],
+                                  &step2[12]);
 
   // stage 3
   step1[8] = step2[8];
@@ -659,15 +659,15 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  highbd_partial_butterfly_sse4_1(in[1], (int)cospi_31_64, (int)cospi_1_64,
-                                  &step1[16], &step1[31]);
-  highbd_partial_butterfly_sse4_1(in[7], -(int)cospi_25_64, (int)cospi_7_64,
-                                  &step1[19], &step1[28]);
+  highbd_partial_butterfly_sse4_1(in[1], cospi_31_64, cospi_1_64, &step1[16],
+                                  &step1[31]);
+  highbd_partial_butterfly_sse4_1(in[7], -cospi_25_64, cospi_7_64, &step1[19],
+                                  &step1[28]);
 
-  highbd_partial_butterfly_sse4_1(in[5], (int)cospi_27_64, (int)cospi_5_64,
-                                  &step1[20], &step1[27]);
-  highbd_partial_butterfly_sse4_1(in[3], -(int)cospi_29_64, (int)cospi_3_64,
-                                  &step1[23], &step1[24]);
+  highbd_partial_butterfly_sse4_1(in[5], cospi_27_64, cospi_5_64, &step1[20],
+                                  &step1[27]);
+  highbd_partial_butterfly_sse4_1(in[3], -cospi_29_64, cospi_3_64, &step1[23],
+                                  &step1[24]);
 
   // stage 2
   step2[16] = step1[16];
@@ -691,16 +691,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  highbd_butterfly_sse4_1(step2[30], step2[17], (int)cospi_28_64,
-                          (int)cospi_4_64, &step1[17], &step1[30]);
-  highbd_butterfly_sse4_1(step2[29], step2[18], -(int)cospi_4_64,
-                          (int)cospi_28_64, &step1[18], &step1[29]);
+  highbd_butterfly_sse4_1(step2[30], step2[17], cospi_28_64, cospi_4_64,
+                          &step1[17], &step1[30]);
+  highbd_butterfly_sse4_1(step2[29], step2[18], -cospi_4_64, cospi_28_64,
+                          &step1[18], &step1[29]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  highbd_butterfly_sse4_1(step2[26], step2[21], (int)cospi_12_64,
-                          (int)cospi_20_64, &step1[21], &step1[26]);
-  highbd_butterfly_sse4_1(step2[25], step2[22], -(int)cospi_20_64,
-                          (int)cospi_12_64, &step1[22], &step1[25]);
+  highbd_butterfly_sse4_1(step2[26], step2[21], cospi_12_64, cospi_20_64,
+                          &step1[21], &step1[26]);
+  highbd_butterfly_sse4_1(step2[25], step2[22], -cospi_20_64, cospi_12_64,
+                          &step1[22], &step1[25]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
--- a/vpx_dsp/x86/highbd_idct4x4_add_sse2.c
+++ b/vpx_dsp/x86/highbd_idct4x4_add_sse2.c
@@ -24,12 +24,9 @@
 }
 
 static INLINE void highbd_idct4_small_sse2(__m128i *const io) {
-  const __m128i cospi_p16_p16 =
-      _mm_setr_epi32((int)cospi_16_64, 0, (int)cospi_16_64, 0);
-  const __m128i cospi_p08_p08 =
-      _mm_setr_epi32((int)cospi_8_64, 0, (int)cospi_8_64, 0);
-  const __m128i cospi_p24_p24 =
-      _mm_setr_epi32((int)cospi_24_64, 0, (int)cospi_24_64, 0);
+  const __m128i cospi_p16_p16 = _mm_setr_epi32(cospi_16_64, 0, cospi_16_64, 0);
+  const __m128i cospi_p08_p08 = _mm_setr_epi32(cospi_8_64, 0, cospi_8_64, 0);
+  const __m128i cospi_p24_p24 = _mm_setr_epi32(cospi_24_64, 0, cospi_24_64, 0);
   __m128i temp1[4], temp2[4], step[4];
 
   transpose_32bit_4x4(io, io);
@@ -81,8 +78,8 @@
 
   // stage 1
   highbd_butterfly_cospi16_sse2(io[0], io[2], &step[0], &step[1]);
-  highbd_butterfly_sse2(io[1], io[3], (int)cospi_24_64, (int)cospi_8_64,
-                        &step[2], &step[3]);
+  highbd_butterfly_sse2(io[1], io[3], cospi_24_64, cospi_8_64, &step[2],
+                        &step[3]);
 
   // stage 2
   io[0] = _mm_add_epi32(step[0], step[3]);  // step[0] + step[3]
@@ -147,8 +144,10 @@
   tran_low_t out;
   __m128i dc, d;
 
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+  out = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
+  out =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
   a1 = ROUND_POWER_OF_TWO(out, 4);
   dc = _mm_set1_epi16(a1);
 
--- a/vpx_dsp/x86/highbd_idct4x4_add_sse4.c
+++ b/vpx_dsp/x86/highbd_idct4x4_add_sse4.c
@@ -24,12 +24,12 @@
   // stage 1
   temp[0] = _mm_add_epi32(io[0], io[2]);  // input[0] + input[2]
   extend_64bit(temp[0], temp);
-  step[0] = multiplication_round_shift_sse4_1(temp, (int)cospi_16_64);
+  step[0] = multiplication_round_shift_sse4_1(temp, cospi_16_64);
   temp[0] = _mm_sub_epi32(io[0], io[2]);  // input[0] - input[2]
   extend_64bit(temp[0], temp);
-  step[1] = multiplication_round_shift_sse4_1(temp, (int)cospi_16_64);
-  highbd_butterfly_sse4_1(io[1], io[3], (int)cospi_24_64, (int)cospi_8_64,
-                          &step[2], &step[3]);
+  step[1] = multiplication_round_shift_sse4_1(temp, cospi_16_64);
+  highbd_butterfly_sse4_1(io[1], io[3], cospi_24_64, cospi_8_64, &step[2],
+                          &step[3]);
 
   // stage 2
   io[0] = _mm_add_epi32(step[0], step[3]);  // step[0] + step[3]
--- a/vpx_dsp/x86/highbd_idct8x8_add_sse2.c
+++ b/vpx_dsp/x86/highbd_idct8x8_add_sse2.c
@@ -25,15 +25,15 @@
   step1[2] = io[4];
   step1[1] = io[2];
   step1[3] = io[6];
-  highbd_butterfly_sse2(io[1], io[7], (int)cospi_28_64, (int)cospi_4_64,
-                        &step1[4], &step1[7]);
-  highbd_butterfly_sse2(io[5], io[3], (int)cospi_12_64, (int)cospi_20_64,
-                        &step1[5], &step1[6]);
+  highbd_butterfly_sse2(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4],
+                        &step1[7]);
+  highbd_butterfly_sse2(io[5], io[3], cospi_12_64, cospi_20_64, &step1[5],
+                        &step1[6]);
 
   // stage 2
   highbd_butterfly_cospi16_sse2(step1[0], step1[2], &step2[0], &step2[1]);
-  highbd_butterfly_sse2(step1[1], step1[3], (int)cospi_24_64, (int)cospi_8_64,
-                        &step2[2], &step2[3]);
+  highbd_butterfly_sse2(step1[1], step1[3], cospi_24_64, cospi_8_64, &step2[2],
+                        &step2[3]);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
@@ -61,18 +61,18 @@
   step1[0] = io[0];
   step1[1] = io[2];
   abs_extend_64bit_sse2(io[1], temp1, sign);
-  step1[4] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_28_64);
-  step1[7] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_4_64);
+  step1[4] = multiplication_round_shift_sse2(temp1, sign, cospi_28_64);
+  step1[7] = multiplication_round_shift_sse2(temp1, sign, cospi_4_64);
   abs_extend_64bit_sse2(io[3], temp1, sign);
-  step1[5] = multiplication_neg_round_shift_sse2(temp1, sign, (int)cospi_20_64);
-  step1[6] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_12_64);
+  step1[5] = multiplication_neg_round_shift_sse2(temp1, sign, cospi_20_64);
+  step1[6] = multiplication_round_shift_sse2(temp1, sign, cospi_12_64);
 
   // stage 2
   abs_extend_64bit_sse2(step1[0], temp1, sign);
-  step2[0] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_16_64);
+  step2[0] = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
   abs_extend_64bit_sse2(step1[1], temp1, sign);
-  step2[2] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_24_64);
-  step2[3] = multiplication_round_shift_sse2(temp1, sign, (int)cospi_8_64);
+  step2[2] = multiplication_round_shift_sse2(temp1, sign, cospi_24_64);
+  step2[3] = multiplication_round_shift_sse2(temp1, sign, cospi_8_64);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
--- a/vpx_dsp/x86/highbd_idct8x8_add_sse4.c
+++ b/vpx_dsp/x86/highbd_idct8x8_add_sse4.c
@@ -27,14 +27,14 @@
   step1[2] = io[4];
   step1[1] = io[2];
   step1[3] = io[6];
-  highbd_butterfly_sse4_1(io[1], io[7], (int)cospi_28_64, (int)cospi_4_64,
-                          &step1[4], &step1[7]);
-  highbd_butterfly_sse4_1(io[5], io[3], (int)cospi_12_64, (int)cospi_20_64,
-                          &step1[5], &step1[6]);
+  highbd_butterfly_sse4_1(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4],
+                          &step1[7]);
+  highbd_butterfly_sse4_1(io[5], io[3], cospi_12_64, cospi_20_64, &step1[5],
+                          &step1[6]);
 
   // stage 2
   highbd_butterfly_cospi16_sse4_1(step1[0], step1[2], &step2[0], &step2[1]);
-  highbd_butterfly_sse4_1(step1[1], step1[3], (int)cospi_24_64, (int)cospi_8_64,
+  highbd_butterfly_sse4_1(step1[1], step1[3], cospi_24_64, cospi_8_64,
                           &step2[2], &step2[3]);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
@@ -63,18 +63,18 @@
   step1[0] = io[0];
   step1[1] = io[2];
   extend_64bit(io[1], temp1);
-  step1[4] = multiplication_round_shift_sse4_1(temp1, (int)cospi_28_64);
-  step1[7] = multiplication_round_shift_sse4_1(temp1, (int)cospi_4_64);
+  step1[4] = multiplication_round_shift_sse4_1(temp1, cospi_28_64);
+  step1[7] = multiplication_round_shift_sse4_1(temp1, cospi_4_64);
   extend_64bit(io[3], temp1);
-  step1[5] = multiplication_round_shift_sse4_1(temp1, -(int)cospi_20_64);
-  step1[6] = multiplication_round_shift_sse4_1(temp1, (int)cospi_12_64);
+  step1[5] = multiplication_round_shift_sse4_1(temp1, -cospi_20_64);
+  step1[6] = multiplication_round_shift_sse4_1(temp1, cospi_12_64);
 
   // stage 2
   extend_64bit(step1[0], temp1);
-  step2[0] = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
+  step2[0] = multiplication_round_shift_sse4_1(temp1, cospi_16_64);
   extend_64bit(step1[1], temp1);
-  step2[2] = multiplication_round_shift_sse4_1(temp1, (int)cospi_24_64);
-  step2[3] = multiplication_round_shift_sse4_1(temp1, (int)cospi_8_64);
+  step2[2] = multiplication_round_shift_sse4_1(temp1, cospi_24_64);
+  step2[3] = multiplication_round_shift_sse4_1(temp1, cospi_8_64);
   step2[4] = _mm_add_epi32(step1[4], step1[5]);
   step2[5] = _mm_sub_epi32(step1[4], step1[5]);
   step2[6] = _mm_sub_epi32(step1[7], step1[6]);
--- a/vpx_dsp/x86/highbd_inv_txfm_sse2.h
+++ b/vpx_dsp/x86/highbd_inv_txfm_sse2.h
@@ -181,10 +181,10 @@
 
   temp2 = _mm_add_epi32(in0, in1);
   abs_extend_64bit_sse2(temp2, temp1, sign);
-  *out0 = multiplication_round_shift_sse2(temp1, sign, (int)cospi_16_64);
+  *out0 = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
   temp2 = _mm_sub_epi32(in0, in1);
   abs_extend_64bit_sse2(temp2, temp1, sign);
-  *out1 = multiplication_round_shift_sse2(temp1, sign, (int)cospi_16_64);
+  *out1 = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
 }
 
 // Only do addition and subtraction butterfly, size = 16, 32
@@ -265,8 +265,10 @@
   tran_low_t out;
   __m128i dc, d;
 
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
-  out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
+  out = HIGHBD_WRAPLOW(
+      dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
+  out =
+      HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
   a1 = ROUND_POWER_OF_TWO(out, (size == 8) ? 5 : 6);
   dc = _mm_set1_epi16(a1);
 
--- a/vpx_dsp/x86/highbd_inv_txfm_sse4.h
+++ b/vpx_dsp/x86/highbd_inv_txfm_sse4.h
@@ -67,10 +67,10 @@
 
   temp2 = _mm_add_epi32(in0, in1);
   extend_64bit(temp2, temp1);
-  *out0 = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
+  *out0 = multiplication_round_shift_sse4_1(temp1, cospi_16_64);
   temp2 = _mm_sub_epi32(in0, in1);
   extend_64bit(temp2, temp1);
-  *out1 = multiplication_round_shift_sse4_1(temp1, (int)cospi_16_64);
+  *out1 = multiplication_round_shift_sse4_1(temp1, cospi_16_64);
 }
 
 static INLINE void highbd_partial_butterfly_sse4_1(const __m128i in,
--- a/vpx_dsp/x86/inv_txfm_sse2.c
+++ b/vpx_dsp/x86/inv_txfm_sse2.c
@@ -246,7 +246,7 @@
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
   const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__const_0 = _mm_set1_epi16(0);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
 
@@ -587,8 +587,8 @@
   const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64);
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
   const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64);
-  const __m128i k__cospi_m16_m16 = _mm_set1_epi16((int16_t)-cospi_16_64);
-  const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i k__cospi_m16_m16 = _mm_set1_epi16(-cospi_16_64);
+  const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
   const __m128i k__DCT_CONST_ROUNDING = _mm_set1_epi32(DCT_CONST_ROUNDING);
@@ -1006,8 +1006,7 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  butterfly(in[4], zero, (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-            &step1[7]);
+  butterfly(in[4], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
 
   // stage 4
   step2[0] = butterfly_cospi16(in[0]);
@@ -1022,8 +1021,7 @@
   step1[2] = step2[0];
   step1[3] = step2[0];
   step1[4] = step2[4];
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -1046,10 +1044,8 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  butterfly(in[2], zero, (int)cospi_30_64, (int)cospi_2_64, &step2[8],
-            &step2[15]);
-  butterfly(zero, in[6], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
-            &step2[12]);
+  butterfly(in[2], zero, cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
+  butterfly(zero, in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
 
   // stage 3
   step1[8] = step2[8];
@@ -1082,24 +1078,20 @@
   __m128i step1[32];
 
   // stage 1
-  butterfly(in[1], zero, (int)cospi_31_64, (int)cospi_1_64, &step1[16],
-            &step1[31]);
-  butterfly(zero, in[7], (int)cospi_7_64, (int)cospi_25_64, &step1[19],
-            &step1[28]);
-  butterfly(in[5], zero, (int)cospi_27_64, (int)cospi_5_64, &step1[20],
-            &step1[27]);
-  butterfly(zero, in[3], (int)cospi_3_64, (int)cospi_29_64, &step1[23],
-            &step1[24]);
+  butterfly(in[1], zero, cospi_31_64, cospi_1_64, &step1[16], &step1[31]);
+  butterfly(zero, in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);
+  butterfly(in[5], zero, cospi_27_64, cospi_5_64, &step1[20], &step1[27]);
+  butterfly(zero, in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);
 
   // stage 3
-  butterfly(step1[31], step1[16], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
+  butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
             &step1[30]);
-  butterfly(step1[28], step1[19], -(int)cospi_4_64, (int)cospi_28_64,
-            &step1[18], &step1[29]);
-  butterfly(step1[27], step1[20], (int)cospi_12_64, (int)cospi_20_64,
-            &step1[21], &step1[26]);
-  butterfly(step1[24], step1[23], -(int)cospi_20_64, (int)cospi_12_64,
-            &step1[22], &step1[25]);
+  butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
+            &step1[29]);
+  butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
+            &step1[26]);
+  butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
+            &step1[25]);
 
   idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
 }
@@ -1145,16 +1137,12 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  butterfly(in[4], in[28], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-            &step1[7]);
-  butterfly(in[20], in[12], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
-            &step1[6]);
+  butterfly(in[4], in[28], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
+  butterfly(in[20], in[12], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
 
   // stage 4
-  butterfly(in[0], in[16], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
-            &step2[0]);
-  butterfly(in[8], in[24], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
-            &step2[3]);
+  butterfly(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
+  butterfly(in[8], in[24], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
   step2[4] = _mm_add_epi16(step1[4], step1[5]);
   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
   step2[6] = _mm_sub_epi16(step1[7], step1[6]);
@@ -1166,8 +1154,7 @@
   step1[2] = _mm_sub_epi16(step2[1], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
   step1[4] = step2[4];
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -1189,14 +1176,10 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  butterfly(in[2], in[30], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
-            &step2[15]);
-  butterfly(in[18], in[14], (int)cospi_14_64, (int)cospi_18_64, &step2[9],
-            &step2[14]);
-  butterfly(in[10], in[22], (int)cospi_22_64, (int)cospi_10_64, &step2[10],
-            &step2[13]);
-  butterfly(in[26], in[6], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
-            &step2[12]);
+  butterfly(in[2], in[30], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
+  butterfly(in[18], in[14], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);
+  butterfly(in[10], in[22], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);
+  butterfly(in[26], in[6], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
 
   // stage 3
   step1[8] = _mm_add_epi16(step2[8], step2[9]);
@@ -1229,24 +1212,16 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  butterfly(in[1], in[31], (int)cospi_31_64, (int)cospi_1_64, &step1[16],
-            &step1[31]);
-  butterfly(in[17], in[15], (int)cospi_15_64, (int)cospi_17_64, &step1[17],
-            &step1[30]);
-  butterfly(in[9], in[23], (int)cospi_23_64, (int)cospi_9_64, &step1[18],
-            &step1[29]);
-  butterfly(in[25], in[7], (int)cospi_7_64, (int)cospi_25_64, &step1[19],
-            &step1[28]);
+  butterfly(in[1], in[31], cospi_31_64, cospi_1_64, &step1[16], &step1[31]);
+  butterfly(in[17], in[15], cospi_15_64, cospi_17_64, &step1[17], &step1[30]);
+  butterfly(in[9], in[23], cospi_23_64, cospi_9_64, &step1[18], &step1[29]);
+  butterfly(in[25], in[7], cospi_7_64, cospi_25_64, &step1[19], &step1[28]);
 
-  butterfly(in[5], in[27], (int)cospi_27_64, (int)cospi_5_64, &step1[20],
-            &step1[27]);
-  butterfly(in[21], in[11], (int)cospi_11_64, (int)cospi_21_64, &step1[21],
-            &step1[26]);
+  butterfly(in[5], in[27], cospi_27_64, cospi_5_64, &step1[20], &step1[27]);
+  butterfly(in[21], in[11], cospi_11_64, cospi_21_64, &step1[21], &step1[26]);
 
-  butterfly(in[13], in[19], (int)cospi_19_64, (int)cospi_13_64, &step1[22],
-            &step1[25]);
-  butterfly(in[29], in[3], (int)cospi_3_64, (int)cospi_29_64, &step1[23],
-            &step1[24]);
+  butterfly(in[13], in[19], cospi_19_64, cospi_13_64, &step1[22], &step1[25]);
+  butterfly(in[29], in[3], cospi_3_64, cospi_29_64, &step1[23], &step1[24]);
 
   // stage 2
   step2[16] = _mm_add_epi16(step1[16], step1[17]);
@@ -1270,16 +1245,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  butterfly(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
+  butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
             &step1[30]);
-  butterfly(step2[29], step2[18], -(int)cospi_4_64, (int)cospi_28_64,
-            &step1[18], &step1[29]);
+  butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
+            &step1[29]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  butterfly(step2[26], step2[21], (int)cospi_12_64, (int)cospi_20_64,
-            &step1[21], &step1[26]);
-  butterfly(step2[25], step2[22], -(int)cospi_20_64, (int)cospi_12_64,
-            &step1[22], &step1[25]);
+  butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
+            &step1[26]);
+  butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
+            &step1[25]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
--- a/vpx_dsp/x86/inv_txfm_sse2.h
+++ b/vpx_dsp/x86/inv_txfm_sse2.h
@@ -90,7 +90,7 @@
 }
 
 static INLINE __m128i butterfly_cospi16(const __m128i in) {
-  const __m128i cst = pair_set_epi16((int)cospi_16_64, (int)cospi_16_64);
+  const __m128i cst = pair_set_epi16(cospi_16_64, cospi_16_64);
   const __m128i lo = _mm_unpacklo_epi16(in, _mm_setzero_si128());
   const __m128i hi = _mm_unpackhi_epi16(in, _mm_setzero_si128());
   return idct_calc_wraplow_sse2(lo, hi, cst);
@@ -252,16 +252,12 @@
   __m128i step1[8], step2[8];
 
   // stage 1
-  butterfly(in[1], in[7], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-            &step1[7]);
-  butterfly(in[5], in[3], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
-            &step1[6]);
+  butterfly(in[1], in[7], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
+  butterfly(in[5], in[3], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
 
   // stage 2
-  butterfly(in[0], in[4], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
-            &step2[0]);
-  butterfly(in[2], in[6], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
-            &step2[3]);
+  butterfly(in[0], in[4], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
+  butterfly(in[2], in[6], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
 
   step2[4] = _mm_add_epi16(step1[4], step1[5]);
   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
@@ -273,8 +269,7 @@
   step1[1] = _mm_add_epi16(step2[1], step2[2]);
   step1[2] = _mm_sub_epi16(step2[1], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
 
   // stage 4
   out[0] = _mm_add_epi16(step1[0], step2[7]);
@@ -350,20 +345,14 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  butterfly(in[1], in[15], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
-            &step2[15]);
-  butterfly(in[9], in[7], (int)cospi_14_64, (int)cospi_18_64, &step2[9],
-            &step2[14]);
-  butterfly(in[5], in[11], (int)cospi_22_64, (int)cospi_10_64, &step2[10],
-            &step2[13]);
-  butterfly(in[13], in[3], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
-            &step2[12]);
+  butterfly(in[1], in[15], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
+  butterfly(in[9], in[7], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);
+  butterfly(in[5], in[11], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);
+  butterfly(in[13], in[3], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
 
   // stage 3
-  butterfly(in[2], in[14], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-            &step1[7]);
-  butterfly(in[10], in[6], (int)cospi_12_64, (int)cospi_20_64, &step1[5],
-            &step1[6]);
+  butterfly(in[2], in[14], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
+  butterfly(in[10], in[6], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
   step1[8] = _mm_add_epi16(step2[8], step2[9]);
   step1[9] = _mm_sub_epi16(step2[8], step2[9]);
   step1[10] = _mm_sub_epi16(step2[11], step2[10]);
@@ -374,14 +363,12 @@
   step1[15] = _mm_add_epi16(step2[14], step2[15]);
 
   // stage 4
-  butterfly(in[0], in[8], (int)cospi_16_64, (int)cospi_16_64, &step2[1],
-            &step2[0]);
-  butterfly(in[4], in[12], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
-            &step2[3]);
-  butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
+  butterfly(in[0], in[8], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
+  butterfly(in[4], in[12], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
+  butterfly(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
             &step2[14]);
-  butterfly(step1[10], step1[13], -(int)cospi_8_64, -(int)cospi_24_64,
-            &step2[13], &step2[10]);
+  butterfly(step1[10], step1[13], -cospi_8_64, -cospi_24_64, &step2[13],
+            &step2[10]);
   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
   step1[4] = _mm_add_epi16(step1[4], step1[5]);
   step2[6] = _mm_sub_epi16(step1[7], step1[6]);
@@ -396,8 +383,7 @@
   step1[1] = _mm_add_epi16(step2[1], step2[2]);
   step1[2] = _mm_sub_epi16(step2[1], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
   step1[8] = _mm_add_epi16(step2[8], step2[11]);
   step1[9] = _mm_add_epi16(step2[9], step2[10]);
   step1[10] = _mm_sub_epi16(step2[9], step2[10]);
@@ -416,10 +402,10 @@
   step2[5] = _mm_sub_epi16(step1[2], step1[5]);
   step2[6] = _mm_sub_epi16(step1[1], step1[6]);
   step2[7] = _mm_sub_epi16(step1[0], step1[7]);
-  butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64,
-            &step2[10], &step2[13]);
-  butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64,
-            &step2[11], &step2[12]);
+  butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &step2[10],
+            &step2[13]);
+  butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &step2[11],
+            &step2[12]);
 
   // stage 7
   out[0] = _mm_add_epi16(step2[0], step1[15]);
@@ -553,25 +539,21 @@
   transpose_16bit_4x8(l, io);
 
   // stage 2
-  butterfly(io[1], zero, (int)cospi_30_64, (int)cospi_2_64, &step2[8],
-            &step2[15]);
-  butterfly(zero, io[3], (int)cospi_6_64, (int)cospi_26_64, &step2[11],
-            &step2[12]);
+  butterfly(io[1], zero, cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
+  butterfly(zero, io[3], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
 
   // stage 3
-  butterfly(io[2], zero, (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-            &step1[7]);
+  butterfly(io[2], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
 
   // stage 4
   step1[0] = butterfly_cospi16(io[0]);
-  butterfly(step2[15], step2[8], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
+  butterfly(step2[15], step2[8], cospi_24_64, cospi_8_64, &step2[9],
             &step2[14]);
-  butterfly(step2[11], step2[12], -(int)cospi_8_64, -(int)cospi_24_64,
-            &step2[13], &step2[10]);
+  butterfly(step2[11], step2[12], -cospi_8_64, -cospi_24_64, &step2[13],
+            &step2[10]);
 
   // stage 5
-  butterfly(step1[7], step1[4], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step1[7], step1[4], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
   step1[8] = _mm_add_epi16(step2[8], step2[11]);
   step1[9] = _mm_add_epi16(step2[9], step2[10]);
   step1[10] = _mm_sub_epi16(step2[9], step2[10]);
@@ -590,10 +572,10 @@
   step2[5] = _mm_sub_epi16(step1[0], step1[5]);
   step2[6] = _mm_sub_epi16(step1[0], step1[6]);
   step2[7] = _mm_sub_epi16(step1[0], step1[7]);
-  butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64,
-            &step2[10], &step2[13]);
-  butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64,
-            &step2[11], &step2[12]);
+  butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &step2[10],
+            &step2[13]);
+  butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &step2[11],
+            &step2[12]);
 
   // stage 7
   io[0] = _mm_add_epi16(step2[0], step1[15]);
@@ -621,10 +603,10 @@
   // stage 4
   step2[8] = step1[8];
   step2[15] = step1[15];
-  butterfly(step1[14], step1[9], (int)cospi_24_64, (int)cospi_8_64, &step2[9],
+  butterfly(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
             &step2[14]);
-  butterfly(step1[13], step1[10], -(int)cospi_8_64, (int)cospi_24_64,
-            &step2[10], &step2[13]);
+  butterfly(step1[13], step1[10], -cospi_8_64, cospi_24_64, &step2[10],
+            &step2[13]);
   step2[11] = step1[11];
   step2[12] = step1[12];
 
@@ -641,10 +623,8 @@
   // stage 6
   out[8] = step1[8];
   out[9] = step1[9];
-  butterfly(step1[13], step1[10], (int)cospi_16_64, (int)cospi_16_64, &out[10],
-            &out[13]);
-  butterfly(step1[12], step1[11], (int)cospi_16_64, (int)cospi_16_64, &out[11],
-            &out[12]);
+  butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &out[10], &out[13]);
+  butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &out[11], &out[12]);
   out[14] = step1[14];
   out[15] = step1[15];
 }
@@ -675,14 +655,14 @@
   // stage 5
   step1[16] = step2[16];
   step1[17] = step2[17];
-  butterfly(step2[29], step2[18], (int)cospi_24_64, (int)cospi_8_64, &step1[18],
+  butterfly(step2[29], step2[18], cospi_24_64, cospi_8_64, &step1[18],
             &step1[29]);
-  butterfly(step2[28], step2[19], (int)cospi_24_64, (int)cospi_8_64, &step1[19],
+  butterfly(step2[28], step2[19], cospi_24_64, cospi_8_64, &step1[19],
             &step1[28]);
-  butterfly(step2[27], step2[20], -(int)cospi_8_64, (int)cospi_24_64,
-            &step1[20], &step1[27]);
-  butterfly(step2[26], step2[21], -(int)cospi_8_64, (int)cospi_24_64,
-            &step1[21], &step1[26]);
+  butterfly(step2[27], step2[20], -cospi_8_64, cospi_24_64, &step1[20],
+            &step1[27]);
+  butterfly(step2[26], step2[21], -cospi_8_64, cospi_24_64, &step1[21],
+            &step1[26]);
   step1[22] = step2[22];
   step1[23] = step2[23];
   step1[24] = step2[24];
@@ -710,14 +690,10 @@
   out[31] = _mm_add_epi16(step1[24], step1[31]);
 
   // stage 7
-  butterfly(step2[27], step2[20], (int)cospi_16_64, (int)cospi_16_64, &out[20],
-            &out[27]);
-  butterfly(step2[26], step2[21], (int)cospi_16_64, (int)cospi_16_64, &out[21],
-            &out[26]);
-  butterfly(step2[25], step2[22], (int)cospi_16_64, (int)cospi_16_64, &out[22],
-            &out[25]);
-  butterfly(step2[24], step2[23], (int)cospi_16_64, (int)cospi_16_64, &out[23],
-            &out[24]);
+  butterfly(step2[27], step2[20], cospi_16_64, cospi_16_64, &out[20], &out[27]);
+  butterfly(step2[26], step2[21], cospi_16_64, cospi_16_64, &out[21], &out[26]);
+  butterfly(step2[25], step2[22], cospi_16_64, cospi_16_64, &out[22], &out[25]);
+  butterfly(step2[24], step2[23], cospi_16_64, cospi_16_64, &out[23], &out[24]);
 }
 
 void idct4_sse2(__m128i *const in);
--- a/vpx_dsp/x86/inv_txfm_ssse3.c
+++ b/vpx_dsp/x86/inv_txfm_ssse3.c
@@ -26,7 +26,7 @@
 }
 
 static INLINE __m128i partial_butterfly_cospi16_ssse3(const __m128i in) {
-  const __m128i coef_pair = _mm_set1_epi16(2 * (int)cospi_16_64);
+  const __m128i coef_pair = _mm_set1_epi16(2 * cospi_16_64);
   return _mm_mulhrs_epi16(in, coef_pair);
 }
 
@@ -57,8 +57,7 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  partial_butterfly_ssse3(in[4], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-                          &step1[7]);
+  partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
 
   // stage 4
   step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
@@ -73,8 +72,7 @@
   step1[2] = step2[0];
   step1[3] = step2[0];
   step1[4] = step2[4];
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -96,9 +94,9 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  partial_butterfly_ssse3(in[2], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
+  partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
                           &step2[15]);
-  partial_butterfly_ssse3(in[6], -(int)cospi_26_64, (int)cospi_6_64, &step2[11],
+  partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
                           &step2[12]);
 
   // stage 3
@@ -131,24 +129,24 @@
   __m128i step1[32];
 
   // stage 1
-  partial_butterfly_ssse3(in[1], (int)cospi_31_64, (int)cospi_1_64, &step1[16],
+  partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
                           &step1[31]);
-  partial_butterfly_ssse3(in[7], -(int)cospi_25_64, (int)cospi_7_64, &step1[19],
+  partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
                           &step1[28]);
-  partial_butterfly_ssse3(in[5], (int)cospi_27_64, (int)cospi_5_64, &step1[20],
+  partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
                           &step1[27]);
-  partial_butterfly_ssse3(in[3], -(int)cospi_29_64, (int)cospi_3_64, &step1[23],
+  partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
                           &step1[24]);
 
   // stage 3
-  butterfly(step1[31], step1[16], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
+  butterfly(step1[31], step1[16], cospi_28_64, cospi_4_64, &step1[17],
             &step1[30]);
-  butterfly(step1[28], step1[19], -(int)cospi_4_64, (int)cospi_28_64,
-            &step1[18], &step1[29]);
-  butterfly(step1[27], step1[20], (int)cospi_12_64, (int)cospi_20_64,
-            &step1[21], &step1[26]);
-  butterfly(step1[24], step1[23], -(int)cospi_20_64, (int)cospi_12_64,
-            &step1[22], &step1[25]);
+  butterfly(step1[28], step1[19], -cospi_4_64, cospi_28_64, &step1[18],
+            &step1[29]);
+  butterfly(step1[27], step1[20], cospi_12_64, cospi_20_64, &step1[21],
+            &step1[26]);
+  butterfly(step1[24], step1[23], -cospi_20_64, cospi_12_64, &step1[22],
+            &step1[25]);
 
   idct32_8x32_quarter_3_4_stage_4_to_7(step1, out);
 }
@@ -194,15 +192,13 @@
   __m128i step1[8], step2[8];
 
   // stage 3
-  partial_butterfly_ssse3(in[4], (int)cospi_28_64, (int)cospi_4_64, &step1[4],
-                          &step1[7]);
-  partial_butterfly_ssse3(in[12], -(int)cospi_20_64, (int)cospi_12_64,
-                          &step1[5], &step1[6]);
+  partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
+  partial_butterfly_ssse3(in[12], -cospi_20_64, cospi_12_64, &step1[5],
+                          &step1[6]);
 
   // stage 4
   step2[0] = partial_butterfly_cospi16_ssse3(in[0]);
-  partial_butterfly_ssse3(in[8], (int)cospi_24_64, (int)cospi_8_64, &step2[2],
-                          &step2[3]);
+  partial_butterfly_ssse3(in[8], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
   step2[4] = _mm_add_epi16(step1[4], step1[5]);
   step2[5] = _mm_sub_epi16(step1[4], step1[5]);
   step2[6] = _mm_sub_epi16(step1[7], step1[6]);
@@ -214,8 +210,7 @@
   step1[2] = _mm_sub_epi16(step2[0], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
   step1[4] = step2[4];
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
   step1[7] = step2[7];
 
   // stage 6
@@ -237,13 +232,13 @@
   __m128i step1[16], step2[16];
 
   // stage 2
-  partial_butterfly_ssse3(in[2], (int)cospi_30_64, (int)cospi_2_64, &step2[8],
+  partial_butterfly_ssse3(in[2], cospi_30_64, cospi_2_64, &step2[8],
                           &step2[15]);
-  partial_butterfly_ssse3(in[14], -(int)cospi_18_64, (int)cospi_14_64,
-                          &step2[9], &step2[14]);
-  partial_butterfly_ssse3(in[10], (int)cospi_22_64, (int)cospi_10_64,
-                          &step2[10], &step2[13]);
-  partial_butterfly_ssse3(in[6], -(int)cospi_26_64, (int)cospi_6_64, &step2[11],
+  partial_butterfly_ssse3(in[14], -cospi_18_64, cospi_14_64, &step2[9],
+                          &step2[14]);
+  partial_butterfly_ssse3(in[10], cospi_22_64, cospi_10_64, &step2[10],
+                          &step2[13]);
+  partial_butterfly_ssse3(in[6], -cospi_26_64, cospi_6_64, &step2[11],
                           &step2[12]);
 
   // stage 3
@@ -277,23 +272,23 @@
   __m128i step1[32], step2[32];
 
   // stage 1
-  partial_butterfly_ssse3(in[1], (int)cospi_31_64, (int)cospi_1_64, &step1[16],
+  partial_butterfly_ssse3(in[1], cospi_31_64, cospi_1_64, &step1[16],
                           &step1[31]);
-  partial_butterfly_ssse3(in[15], -(int)cospi_17_64, (int)cospi_15_64,
-                          &step1[17], &step1[30]);
-  partial_butterfly_ssse3(in[9], (int)cospi_23_64, (int)cospi_9_64, &step1[18],
+  partial_butterfly_ssse3(in[15], -cospi_17_64, cospi_15_64, &step1[17],
+                          &step1[30]);
+  partial_butterfly_ssse3(in[9], cospi_23_64, cospi_9_64, &step1[18],
                           &step1[29]);
-  partial_butterfly_ssse3(in[7], -(int)cospi_25_64, (int)cospi_7_64, &step1[19],
+  partial_butterfly_ssse3(in[7], -cospi_25_64, cospi_7_64, &step1[19],
                           &step1[28]);
 
-  partial_butterfly_ssse3(in[5], (int)cospi_27_64, (int)cospi_5_64, &step1[20],
+  partial_butterfly_ssse3(in[5], cospi_27_64, cospi_5_64, &step1[20],
                           &step1[27]);
-  partial_butterfly_ssse3(in[11], -(int)cospi_21_64, (int)cospi_11_64,
-                          &step1[21], &step1[26]);
+  partial_butterfly_ssse3(in[11], -cospi_21_64, cospi_11_64, &step1[21],
+                          &step1[26]);
 
-  partial_butterfly_ssse3(in[13], (int)cospi_19_64, (int)cospi_13_64,
-                          &step1[22], &step1[25]);
-  partial_butterfly_ssse3(in[3], -(int)cospi_29_64, (int)cospi_3_64, &step1[23],
+  partial_butterfly_ssse3(in[13], cospi_19_64, cospi_13_64, &step1[22],
+                          &step1[25]);
+  partial_butterfly_ssse3(in[3], -cospi_29_64, cospi_3_64, &step1[23],
                           &step1[24]);
 
   // stage 2
@@ -318,16 +313,16 @@
   // stage 3
   step1[16] = step2[16];
   step1[31] = step2[31];
-  butterfly(step2[30], step2[17], (int)cospi_28_64, (int)cospi_4_64, &step1[17],
+  butterfly(step2[30], step2[17], cospi_28_64, cospi_4_64, &step1[17],
             &step1[30]);
-  butterfly(step2[29], step2[18], -(int)cospi_4_64, (int)cospi_28_64,
-            &step1[18], &step1[29]);
+  butterfly(step2[29], step2[18], -cospi_4_64, cospi_28_64, &step1[18],
+            &step1[29]);
   step1[19] = step2[19];
   step1[20] = step2[20];
-  butterfly(step2[26], step2[21], (int)cospi_12_64, (int)cospi_20_64,
-            &step1[21], &step1[26]);
-  butterfly(step2[25], step2[22], -(int)cospi_20_64, (int)cospi_12_64,
-            &step1[22], &step1[25]);
+  butterfly(step2[26], step2[21], cospi_12_64, cospi_20_64, &step1[21],
+            &step1[26]);
+  butterfly(step2[25], step2[22], -cospi_20_64, cospi_12_64, &step1[22],
+            &step1[25]);
   step1[23] = step2[23];
   step1[24] = step2[24];
   step1[27] = step2[27];
--- a/vpx_dsp/x86/inv_txfm_ssse3.h
+++ b/vpx_dsp/x86/inv_txfm_ssse3.h
@@ -22,7 +22,7 @@
   const __m128i cp_28d_4d = dual_set_epi16(2 * cospi_28_64, 2 * cospi_4_64);
   const __m128i cp_n20d_12d = dual_set_epi16(-2 * cospi_20_64, 2 * cospi_12_64);
   const __m128i cp_8d_24d = dual_set_epi16(2 * cospi_8_64, 2 * cospi_24_64);
-  const __m128i cp_16_16 = _mm_set1_epi16((int16_t)cospi_16_64);
+  const __m128i cp_16_16 = _mm_set1_epi16(cospi_16_64);
   const __m128i cp_16_n16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i cospi_16_64d = _mm_set1_epi16((int16_t)(2 * cospi_16_64));
   const __m128i cospi_28_64d = _mm_set1_epi16((int16_t)(2 * cospi_28_64));
@@ -92,8 +92,7 @@
   step1[1] = _mm_add_epi16(step2[0], step2[2]);
   step1[2] = _mm_sub_epi16(step2[0], step2[2]);
   step1[3] = _mm_sub_epi16(step2[0], step2[3]);
-  butterfly(step2[6], step2[5], (int)cospi_16_64, (int)cospi_16_64, &step1[5],
-            &step1[6]);
+  butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
 
   // stage 4
   io[0] = _mm_add_epi16(step1[0], step2[7]);