Commit 6ae7b3d9 authored by Jingning Han's avatar Jingning Han

Fix high bit-depth quantization process

Scale the rounding factor according to the scaling factor applied
to the quantization step size. This resolves a compression
performance regression in 32x32 and above transform size.

BUG=aomedia:599

Change-Id: Id3fc9a46c4a8843ff5d77ccaa59ee3112b12d7f4
parent 32d26bc0
......@@ -1547,7 +1547,7 @@ void av1_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
#endif
const int coeff_sign = (coeff >> 31);
const int abs_coeff = (coeff ^ coeff_sign) - coeff_sign;
const int64_t tmp = abs_coeff + round_ptr[rc != 0];
const int64_t tmp = abs_coeff + (round_ptr[rc != 0] >> log_scale);
#if CONFIG_AOM_QM
const uint32_t abs_qcoeff =
(uint32_t)((tmp * quant_ptr[rc != 0] * wt) >> (shift + AOM_QM_BITS));
......
......@@ -133,7 +133,8 @@ void av1_highbd_quantize_fp_sse4_1(
coeff[0] = _mm_loadu_si128((__m128i const *)src);
qparam[0] =
_mm_set_epi32(round_ptr[1], round_ptr[1], round_ptr[1], round_ptr[0]);
_mm_set_epi32(round_ptr[1] >> log_scale, round_ptr[1] >> log_scale,
round_ptr[1] >> log_scale, round_ptr[0] >> log_scale);
qparam[1] = _mm_set_epi64x(quant_ptr[1], quant_ptr[0]);
qparam[2] = _mm_set_epi64x(dequant_ptr[1], dequant_ptr[0]);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment