ref: 47b9a09120c1085932af71f55eeba3e891c57a5e
parent: fb60204d4c36a4041daaca2f1461b731fa2dfaa2
author: James Zern <jzern@google.com>
date: Tue Apr 4 16:37:17 EDT 2017
Resolve -Wshorten-64-to-32 in highbd variance. For 8-bit the subtrahend is small enough to fit into uint32_t. This is the same that was done for: c0241664a Resolve -Wshorten-64-to-32 in variance. For 10/12-bit apply: 63a37d16f Prevent negative variance Change-Id: Iab35e3f3f269035e17c711bd6cc01272c3137e1d
--- a/vpx_dsp/variance.c
+++ b/vpx_dsp/variance.c
@@ -294,7 +294,7 @@
uint32_t *sse) { \
int sum; \
highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
- return *sse - (((int64_t)sum * sum) / (W * H)); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
} \
\
uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
--- a/vpx_dsp/x86/highbd_variance_sse2.c
+++ b/vpx_dsp/x86/highbd_variance_sse2.c
@@ -135,7 +135,7 @@
highbd_8_variance_sse2( \
src, src_stride, ref, ref_stride, w, h, sse, &sum, \
vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \
- return *sse - (((int64_t)sum * sum) >> shift); \
+ return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
} \
\
uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \
@@ -293,12 +293,13 @@
} \
} \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ return sse - (uint32_t)((cast se * se) >> (wlog2 + hlog2)); \
} \
\
uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \
+ int64_t var; \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
@@ -328,7 +329,8 @@
se = ROUND_POWER_OF_TWO(se, 2); \
sse = ROUND_POWER_OF_TWO(sse, 4); \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
} \
\
uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \
@@ -337,6 +339,7 @@
int start_row; \
uint32_t sse; \
int se = 0; \
+ int64_t var; \
uint64_t long_sse = 0; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
@@ -375,7 +378,8 @@
se = ROUND_POWER_OF_TWO(se, 4); \
sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
#define FNS(opt) \
@@ -444,7 +448,7 @@
} \
} \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ return sse - (uint32_t)((cast se * se) >> (wlog2 + hlog2)); \
} \
\
uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \
@@ -451,6 +455,7 @@
const uint8_t *src8, int src_stride, int x_offset, int y_offset, \
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
+ int64_t var; \
uint32_t sse; \
uint16_t *src = CONVERT_TO_SHORTPTR(src8); \
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \
@@ -481,7 +486,8 @@
se = ROUND_POWER_OF_TWO(se, 2); \
sse = ROUND_POWER_OF_TWO(sse, 4); \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
} \
\
uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \
@@ -489,6 +495,7 @@
const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \
const uint8_t *sec8) { \
int start_row; \
+ int64_t var; \
uint32_t sse; \
int se = 0; \
uint64_t long_sse = 0; \
@@ -530,7 +537,8 @@
se = ROUND_POWER_OF_TWO(se, 4); \
sse = (uint32_t)ROUND_POWER_OF_TWO(long_sse, 8); \
*sse_ptr = sse; \
- return sse - ((cast se * se) >> (wlog2 + hlog2)); \
+ var = (int64_t)(sse) - ((cast se * se) >> (wlog2 + hlog2)); \
+ return (var >= 0) ? (uint32_t)var : 0; \
}
#define FNS(opt1) \