Commit 98c59c98 authored by Yaowu Xu's avatar Yaowu Xu
Browse files

Make type conversions explicit

This eliminates MSVC compiler warnings.

Change-Id: Id6ace2586ed7c6248366905b133448fe8ecbd53d
parent 569101be
......@@ -2899,7 +2899,7 @@ static uint32_t write_tiles(VP10_COMP *const cpi,
}
}
#endif // CONFIG_EXT_TILE
return total_size;
return (uint32_t)total_size;
}
static void write_render_size(const VP10_COMMON *cm,
......@@ -3448,7 +3448,7 @@ void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dst, size_t *size) {
// Size of compressed header
vpx_wb_write_literal(&wb, 0, 16);
uncompressed_header_size = vpx_wb_bytes_written(&wb);
uncompressed_header_size = (uint32_t)vpx_wb_bytes_written(&wb);
data += uncompressed_header_size;
vpx_clear_system_state();
......
......@@ -719,8 +719,8 @@ void masked_variance(const uint8_t *a, int a_stride,
m += m_stride;
}
sum64 = (sum64 >= 0) ? sum64 : -sum64;
*sum = ROUND_POWER_OF_TWO(sum64, 6);
*sse = ROUND_POWER_OF_TWO(sse64, 12);
*sum = (int)ROUND_POWER_OF_TWO(sum64, 6);
*sse = (uint32_t)ROUND_POWER_OF_TWO(sse64, 12);
}
#define MASK_VAR(W, H) \
......
......@@ -76,7 +76,7 @@ uint32_t vpx_highbd_8_variance4x4_sse4_1(const uint8_t *a,
variance4x4_64_sse4_1(a, a_stride, b, b_stride, &local_sse, &sum);
*sse = (uint32_t)local_sse;
return *sse - ((sum * sum) >> 4);
return *sse - (uint32_t)((sum * sum) >> 4);
}
uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a,
......@@ -91,7 +91,7 @@ uint32_t vpx_highbd_10_variance4x4_sse4_1(const uint8_t *a,
*sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 4);
sum = ROUND_POWER_OF_TWO(sum, 2);
return *sse - ((sum * sum) >> 4);
return *sse - (uint32_t)((sum * sum) >> 4);
}
uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a,
......@@ -106,7 +106,7 @@ uint32_t vpx_highbd_12_variance4x4_sse4_1(const uint8_t *a,
*sse = (uint32_t)ROUND_POWER_OF_TWO(local_sse, 8);
sum = ROUND_POWER_OF_TWO(sum, 4);
return *sse - ((sum * sum) >> 4);
return *sse - (uint32_t)((sum * sum) >> 4);
}
// Sub-pixel
......
......@@ -54,9 +54,9 @@ static INLINE int64_t hsum_epi32_si64(__m128i v_d) {
}
#endif // CONFIG_VP9_HIGHBITDEPTH
static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
unsigned int* sse,
const int w, const int h) {
static INLINE uint32_t calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
uint32_t* sse,
const int w, const int h) {
int64_t sum64;
uint64_t sse64;
......@@ -71,9 +71,9 @@ static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
sse64 = ROUND_POWER_OF_TWO(sse64, 12);
// Store the SSE
*sse = (unsigned int)sse64;
*sse = (uint32_t)sse64;
// Compute the variance
return *sse - ((sum64 * sum64) / (w * h));
return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
/*****************************************************************************
......@@ -497,9 +497,9 @@ static INLINE unsigned int highbd_masked_variancewxh_ssse3(
&sum64, &sse64);
// Store the SSE
*sse = (unsigned int)sse64;
*sse = (uint32_t)sse64;
// Compute and return variance
return *sse - ((sum64 * sum64) / (w * h));
return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
......@@ -523,9 +523,9 @@ static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
sse64 = ROUND_POWER_OF_TWO(sse64, 4);
// Store the SSE
*sse = (unsigned int)sse64;
*sse = (uint32_t)sse64;
// Compute and return variance
return *sse - ((sum64 * sum64) / (w * h));
return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
......@@ -548,9 +548,9 @@ static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
sse64 = ROUND_POWER_OF_TWO(sse64, 8);
// Store the SSE
*sse = (unsigned int)sse64;
*sse = (uint32_t)sse64;
// Compute and return variance
return *sse - ((sum64 * sum64) / (w * h));
return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
#define HIGHBD_MASKED_VARWXH(W, H) \
......@@ -1460,10 +1460,11 @@ static void highbd_sum_and_sse(const __m128i v_a_w, const __m128i v_b_w,
*v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
}
static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
__m128i v_sse_q,
unsigned int* sse,
const int w, const int h) {
static INLINE uint32_t highbd_10_calc_masked_variance(__m128i v_sum_d,
__m128i v_sse_q,
uint32_t* sse,
const int w,
const int h) {
int64_t sum64;
uint64_t sse64;
......@@ -1482,14 +1483,15 @@ static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
sse64 = ROUND_POWER_OF_TWO(sse64, 4);
// Store the SSE
*sse = (unsigned int)sse64;
*sse = (uint32_t)sse64;
// Compute the variance
return *sse - ((sum64 * sum64) / (w * h));
return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
__m128i v_sse_q,
unsigned int* sse,
const int w, const int h) {
static INLINE uint32_t highbd_12_calc_masked_variance(__m128i v_sum_d,
__m128i v_sse_q,
uint32_t* sse,
const int w,
const int h) {
int64_t sum64;
uint64_t sse64;
......@@ -1508,9 +1510,9 @@ static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
sse64 = ROUND_POWER_OF_TWO(sse64, 8);
// Store the SSE
*sse = (unsigned int)sse64;
*sse = (uint32_t)sse64;
// Compute the variance
return *sse - ((sum64 * sum64) / (w * h));
return *sse - (uint32_t)((sum64 * sum64) / (w * h));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment