Commit b81f04a0 authored by James Zern's avatar James Zern Committed by Gerrit Code Review

Merge "move vp9_avg to vpx_dsp"

parents b7654afb d36659ce
......@@ -15,9 +15,7 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#if CONFIG_VP9_ENCODER
#include "./vp9_rtcd.h"
#endif
#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
......@@ -323,91 +321,91 @@ using std::tr1::make_tuple;
INSTANTIATE_TEST_CASE_P(
C, AverageTest,
::testing::Values(
make_tuple(16, 16, 1, 8, &vp9_avg_8x8_c),
make_tuple(16, 16, 1, 4, &vp9_avg_4x4_c)));
make_tuple(16, 16, 1, 8, &vpx_avg_8x8_c),
make_tuple(16, 16, 1, 4, &vpx_avg_4x4_c)));
INSTANTIATE_TEST_CASE_P(
C, SatdTest,
::testing::Values(
make_tuple(16, &vp9_satd_c),
make_tuple(64, &vp9_satd_c),
make_tuple(256, &vp9_satd_c),
make_tuple(1024, &vp9_satd_c)));
make_tuple(16, &vpx_satd_c),
make_tuple(64, &vpx_satd_c),
make_tuple(256, &vpx_satd_c),
make_tuple(1024, &vpx_satd_c)));
#if HAVE_SSE2
INSTANTIATE_TEST_CASE_P(
SSE2, AverageTest,
::testing::Values(
make_tuple(16, 16, 0, 8, &vp9_avg_8x8_sse2),
make_tuple(16, 16, 5, 8, &vp9_avg_8x8_sse2),
make_tuple(32, 32, 15, 8, &vp9_avg_8x8_sse2),
make_tuple(16, 16, 0, 4, &vp9_avg_4x4_sse2),
make_tuple(16, 16, 5, 4, &vp9_avg_4x4_sse2),
make_tuple(32, 32, 15, 4, &vp9_avg_4x4_sse2)));
make_tuple(16, 16, 0, 8, &vpx_avg_8x8_sse2),
make_tuple(16, 16, 5, 8, &vpx_avg_8x8_sse2),
make_tuple(32, 32, 15, 8, &vpx_avg_8x8_sse2),
make_tuple(16, 16, 0, 4, &vpx_avg_4x4_sse2),
make_tuple(16, 16, 5, 4, &vpx_avg_4x4_sse2),
make_tuple(32, 32, 15, 4, &vpx_avg_4x4_sse2)));
INSTANTIATE_TEST_CASE_P(
SSE2, IntProRowTest, ::testing::Values(
make_tuple(16, &vp9_int_pro_row_sse2, &vp9_int_pro_row_c),
make_tuple(32, &vp9_int_pro_row_sse2, &vp9_int_pro_row_c),
make_tuple(64, &vp9_int_pro_row_sse2, &vp9_int_pro_row_c)));
make_tuple(16, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
make_tuple(32, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c),
make_tuple(64, &vpx_int_pro_row_sse2, &vpx_int_pro_row_c)));
INSTANTIATE_TEST_CASE_P(
SSE2, IntProColTest, ::testing::Values(
make_tuple(16, &vp9_int_pro_col_sse2, &vp9_int_pro_col_c),
make_tuple(32, &vp9_int_pro_col_sse2, &vp9_int_pro_col_c),
make_tuple(64, &vp9_int_pro_col_sse2, &vp9_int_pro_col_c)));
make_tuple(16, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
make_tuple(32, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c),
make_tuple(64, &vpx_int_pro_col_sse2, &vpx_int_pro_col_c)));
INSTANTIATE_TEST_CASE_P(
SSE2, SatdTest,
::testing::Values(
make_tuple(16, &vp9_satd_sse2),
make_tuple(64, &vp9_satd_sse2),
make_tuple(256, &vp9_satd_sse2),
make_tuple(1024, &vp9_satd_sse2)));
make_tuple(16, &vpx_satd_sse2),
make_tuple(64, &vpx_satd_sse2),
make_tuple(256, &vpx_satd_sse2),
make_tuple(1024, &vpx_satd_sse2)));
#endif
#if HAVE_NEON
INSTANTIATE_TEST_CASE_P(
NEON, AverageTest,
::testing::Values(
make_tuple(16, 16, 0, 8, &vp9_avg_8x8_neon),
make_tuple(16, 16, 5, 8, &vp9_avg_8x8_neon),
make_tuple(32, 32, 15, 8, &vp9_avg_8x8_neon),
make_tuple(16, 16, 0, 4, &vp9_avg_4x4_neon),
make_tuple(16, 16, 5, 4, &vp9_avg_4x4_neon),
make_tuple(32, 32, 15, 4, &vp9_avg_4x4_neon)));
make_tuple(16, 16, 0, 8, &vpx_avg_8x8_neon),
make_tuple(16, 16, 5, 8, &vpx_avg_8x8_neon),
make_tuple(32, 32, 15, 8, &vpx_avg_8x8_neon),
make_tuple(16, 16, 0, 4, &vpx_avg_4x4_neon),
make_tuple(16, 16, 5, 4, &vpx_avg_4x4_neon),
make_tuple(32, 32, 15, 4, &vpx_avg_4x4_neon)));
INSTANTIATE_TEST_CASE_P(
NEON, IntProRowTest, ::testing::Values(
make_tuple(16, &vp9_int_pro_row_neon, &vp9_int_pro_row_c),
make_tuple(32, &vp9_int_pro_row_neon, &vp9_int_pro_row_c),
make_tuple(64, &vp9_int_pro_row_neon, &vp9_int_pro_row_c)));
make_tuple(16, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
make_tuple(32, &vpx_int_pro_row_neon, &vpx_int_pro_row_c),
make_tuple(64, &vpx_int_pro_row_neon, &vpx_int_pro_row_c)));
INSTANTIATE_TEST_CASE_P(
NEON, IntProColTest, ::testing::Values(
make_tuple(16, &vp9_int_pro_col_neon, &vp9_int_pro_col_c),
make_tuple(32, &vp9_int_pro_col_neon, &vp9_int_pro_col_c),
make_tuple(64, &vp9_int_pro_col_neon, &vp9_int_pro_col_c)));
make_tuple(16, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
make_tuple(32, &vpx_int_pro_col_neon, &vpx_int_pro_col_c),
make_tuple(64, &vpx_int_pro_col_neon, &vpx_int_pro_col_c)));
INSTANTIATE_TEST_CASE_P(
NEON, SatdTest,
::testing::Values(
make_tuple(16, &vp9_satd_neon),
make_tuple(64, &vp9_satd_neon),
make_tuple(256, &vp9_satd_neon),
make_tuple(1024, &vp9_satd_neon)));
make_tuple(16, &vpx_satd_neon),
make_tuple(64, &vpx_satd_neon),
make_tuple(256, &vpx_satd_neon),
make_tuple(1024, &vpx_satd_neon)));
#endif
#if HAVE_MSA
INSTANTIATE_TEST_CASE_P(
MSA, AverageTest,
::testing::Values(
make_tuple(16, 16, 0, 8, &vp9_avg_8x8_msa),
make_tuple(16, 16, 5, 8, &vp9_avg_8x8_msa),
make_tuple(32, 32, 15, 8, &vp9_avg_8x8_msa),
make_tuple(16, 16, 0, 4, &vp9_avg_4x4_msa),
make_tuple(16, 16, 5, 4, &vp9_avg_4x4_msa),
make_tuple(32, 32, 15, 4, &vp9_avg_4x4_msa)));
make_tuple(16, 16, 0, 8, &vpx_avg_8x8_msa),
make_tuple(16, 16, 5, 8, &vpx_avg_8x8_msa),
make_tuple(32, 32, 15, 8, &vpx_avg_8x8_msa),
make_tuple(16, 16, 0, 4, &vpx_avg_4x4_msa),
make_tuple(16, 16, 5, 4, &vpx_avg_4x4_msa),
make_tuple(32, 32, 15, 4, &vpx_avg_4x4_msa)));
#endif
} // namespace
......@@ -143,7 +143,6 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct32x32_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_avg_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_error_block_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_quantize_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_subtract_test.cc
......@@ -170,6 +169,11 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_dct_test.cc
endif # VP10
## Multi-codec / unconditional whitebox tests.
ifeq ($(findstring yes,$(CONFIG_VP9_ENCODER)$(CONFIG_VP10_ENCODER)),yes)
LIBVPX_TEST_SRCS-yes += avg_test.cc
endif
LIBVPX_TEST_SRCS-$(CONFIG_ENCODERS) += sad_test.cc
TEST_INTRA_PRED_SPEED_SRCS-yes := test_intra_pred_speed.cc
......
......@@ -351,42 +351,6 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
#
if (vpx_config("CONFIG_VP10_ENCODER") eq "yes") {
add_proto qw/unsigned int vp10_avg_8x8/, "const uint8_t *, int p";
specialize qw/vp10_avg_8x8 sse2 neon msa/;
add_proto qw/unsigned int vp10_avg_4x4/, "const uint8_t *, int p";
specialize qw/vp10_avg_4x4 sse2 msa/;
add_proto qw/void vp10_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
specialize qw/vp10_minmax_8x8 sse2/;
add_proto qw/void vp10_hadamard_8x8/, "int16_t const *src_diff, int src_stride, int16_t *coeff";
specialize qw/vp10_hadamard_8x8 sse2/, "$ssse3_x86_64_x86inc";
add_proto qw/void vp10_hadamard_16x16/, "int16_t const *src_diff, int src_stride, int16_t *coeff";
specialize qw/vp10_hadamard_16x16 sse2/;
add_proto qw/int16_t vp10_satd/, "const int16_t *coeff, int length";
specialize qw/vp10_satd sse2/;
add_proto qw/void vp10_int_pro_row/, "int16_t *hbuf, uint8_t const *ref, const int ref_stride, const int height";
specialize qw/vp10_int_pro_row sse2 neon/;
add_proto qw/int16_t vp10_int_pro_col/, "uint8_t const *ref, const int width";
specialize qw/vp10_int_pro_col sse2 neon/;
add_proto qw/int vp10_vector_var/, "int16_t const *ref, int16_t const *src, const int bwl";
specialize qw/vp10_vector_var neon sse2/;
if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/unsigned int vp10_highbd_avg_8x8/, "const uint8_t *, int p";
specialize qw/vp10_highbd_avg_8x8/;
add_proto qw/unsigned int vp10_highbd_avg_4x4/, "const uint8_t *, int p";
specialize qw/vp10_highbd_avg_4x4/;
add_proto qw/void vp10_highbd_minmax_8x8/, "const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max";
specialize qw/vp10_highbd_minmax_8x8/;
}
# ENCODEMB INVOKE
#
......
/*
* Copyright (c) 2015 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <arm_neon.h>
#include <assert.h>
#include "./vp10_rtcd.h"
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) {
const uint32x4_t a = vpaddlq_u16(v_16x8);
const uint64x2_t b = vpaddlq_u32(a);
const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
vreinterpret_u32_u64(vget_high_u64(b)));
return vget_lane_u32(c, 0);
}
unsigned int vp10_avg_8x8_neon(const uint8_t *s, int p) {
uint8x8_t v_s0 = vld1_u8(s);
const uint8x8_t v_s1 = vld1_u8(s + p);
uint16x8_t v_sum = vaddl_u8(v_s0, v_s1);
v_s0 = vld1_u8(s + 2 * p);
v_sum = vaddw_u8(v_sum, v_s0);
v_s0 = vld1_u8(s + 3 * p);
v_sum = vaddw_u8(v_sum, v_s0);
v_s0 = vld1_u8(s + 4 * p);
v_sum = vaddw_u8(v_sum, v_s0);
v_s0 = vld1_u8(s + 5 * p);
v_sum = vaddw_u8(v_sum, v_s0);
v_s0 = vld1_u8(s + 6 * p);
v_sum = vaddw_u8(v_sum, v_s0);
v_s0 = vld1_u8(s + 7 * p);
v_sum = vaddw_u8(v_sum, v_s0);
return (horizontal_add_u16x8(v_sum) + 32) >> 6;
}
void vp10_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref,
const int ref_stride, const int height) {
int i;
uint16x8_t vec_sum_lo = vdupq_n_u16(0);
uint16x8_t vec_sum_hi = vdupq_n_u16(0);
const int shift_factor = ((height >> 5) + 3) * -1;
const int16x8_t vec_shift = vdupq_n_s16(shift_factor);
for (i = 0; i < height; i += 8) {
const uint8x16_t vec_row1 = vld1q_u8(ref);
const uint8x16_t vec_row2 = vld1q_u8(ref + ref_stride);
const uint8x16_t vec_row3 = vld1q_u8(ref + ref_stride * 2);
const uint8x16_t vec_row4 = vld1q_u8(ref + ref_stride * 3);
const uint8x16_t vec_row5 = vld1q_u8(ref + ref_stride * 4);
const uint8x16_t vec_row6 = vld1q_u8(ref + ref_stride * 5);
const uint8x16_t vec_row7 = vld1q_u8(ref + ref_stride * 6);
const uint8x16_t vec_row8 = vld1q_u8(ref + ref_stride * 7);
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row1));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row1));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row2));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row2));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row3));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row3));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row4));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row4));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row5));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row5));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row6));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row6));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row7));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row7));
vec_sum_lo = vaddw_u8(vec_sum_lo, vget_low_u8(vec_row8));
vec_sum_hi = vaddw_u8(vec_sum_hi, vget_high_u8(vec_row8));
ref += ref_stride * 8;
}
vec_sum_lo = vshlq_u16(vec_sum_lo, vec_shift);
vec_sum_hi = vshlq_u16(vec_sum_hi, vec_shift);
vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_lo));
hbuf += 8;
vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi));
}
int16_t vp10_int_pro_col_neon(uint8_t const *ref, const int width) {
int i;
uint16x8_t vec_sum = vdupq_n_u16(0);
for (i = 0; i < width; i += 16) {
const uint8x16_t vec_row = vld1q_u8(ref);
vec_sum = vaddw_u8(vec_sum, vget_low_u8(vec_row));
vec_sum = vaddw_u8(vec_sum, vget_high_u8(vec_row));
ref += 16;
}
return horizontal_add_u16x8(vec_sum);
}
// ref, src = [0, 510] - max diff = 16-bits
// bwl = {2, 3, 4}, width = {16, 32, 64}
int vp10_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) {
int width = 4 << bwl;
int32x4_t sse = vdupq_n_s32(0);
int16x8_t total = vdupq_n_s16(0);
assert(width >= 8);
assert((width % 8) == 0);
do {
const int16x8_t r = vld1q_s16(ref);
const int16x8_t s = vld1q_s16(src);
const int16x8_t diff = vsubq_s16(r, s); // [-510, 510], 10 bits.
const int16x4_t diff_lo = vget_low_s16(diff);
const int16x4_t diff_hi = vget_high_s16(diff);
sse = vmlal_s16(sse, diff_lo, diff_lo); // dynamic range 26 bits.
sse = vmlal_s16(sse, diff_hi, diff_hi);
total = vaddq_s16(total, diff); // dynamic range 16 bits.
ref += 8;
src += 8;
width -= 8;
} while (width != 0);
{
// Note: 'total''s pairwise addition could be implemented similarly to
// horizontal_add_u16x8(), but one less vpaddl with 'total' when paired
// with the summation of 'sse' performed better on a Cortex-A15.
const int32x4_t t0 = vpaddlq_s16(total); // cascading summation of 'total'
const int32x2_t t1 = vadd_s32(vget_low_s32(t0), vget_high_s32(t0));
const int32x2_t t2 = vpadd_s32(t1, t1);
const int t = vget_lane_s32(t2, 0);
const int64x2_t s0 = vpaddlq_s32(sse); // cascading summation of 'sse'.
const int32x2_t s1 = vadd_s32(vreinterpret_s32_s64(vget_low_s64(s0)),
vreinterpret_s32_s64(vget_high_s64(s0)));
const int s = vget_lane_s32(s1, 0);
const int shift_factor = bwl + 2;
return s - ((t * t) >> shift_factor);
}
}
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vp10_rtcd.h"
#include "vp10/common/common.h"
#include "vpx_ports/mem.h"
unsigned int vp10_avg_8x8_c(const uint8_t *s, int p) {
int i, j;
int sum = 0;
for (i = 0; i < 8; ++i, s+=p)
for (j = 0; j < 8; sum += s[j], ++j) {}
return (sum + 32) >> 6;
}
unsigned int vp10_avg_4x4_c(const uint8_t *s, int p) {
int i, j;
int sum = 0;
for (i = 0; i < 4; ++i, s+=p)
for (j = 0; j < 4; sum += s[j], ++j) {}
return (sum + 8) >> 4;
}
// src_diff: first pass, 9 bit, dynamic range [-255, 255]
// second pass, 12 bit, dynamic range [-2040, 2040]
static void hadamard_col8(const int16_t *src_diff, int src_stride,
int16_t *coeff) {
int16_t b0 = src_diff[0 * src_stride] + src_diff[1 * src_stride];
int16_t b1 = src_diff[0 * src_stride] - src_diff[1 * src_stride];
int16_t b2 = src_diff[2 * src_stride] + src_diff[3 * src_stride];
int16_t b3 = src_diff[2 * src_stride] - src_diff[3 * src_stride];
int16_t b4 = src_diff[4 * src_stride] + src_diff[5 * src_stride];
int16_t b5 = src_diff[4 * src_stride] - src_diff[5 * src_stride];
int16_t b6 = src_diff[6 * src_stride] + src_diff[7 * src_stride];
int16_t b7 = src_diff[6 * src_stride] - src_diff[7 * src_stride];
int16_t c0 = b0 + b2;
int16_t c1 = b1 + b3;
int16_t c2 = b0 - b2;
int16_t c3 = b1 - b3;
int16_t c4 = b4 + b6;
int16_t c5 = b5 + b7;
int16_t c6 = b4 - b6;
int16_t c7 = b5 - b7;
coeff[0] = c0 + c4;
coeff[7] = c1 + c5;
coeff[3] = c2 + c6;
coeff[4] = c3 + c7;
coeff[2] = c0 - c4;
coeff[6] = c1 - c5;
coeff[1] = c2 - c6;
coeff[5] = c3 - c7;
}
void vp10_hadamard_8x8_c(int16_t const *src_diff, int src_stride,
int16_t *coeff) {
int idx;
int16_t buffer[64];
int16_t *tmp_buf = &buffer[0];
for (idx = 0; idx < 8; ++idx) {
hadamard_col8(src_diff, src_stride, tmp_buf); // src_diff: 9 bit
// dynamic range [-255, 255]
tmp_buf += 8;
++src_diff;
}
tmp_buf = &buffer[0];
for (idx = 0; idx < 8; ++idx) {
hadamard_col8(tmp_buf, 8, coeff); // tmp_buf: 12 bit
// dynamic range [-2040, 2040]
coeff += 8; // coeff: 15 bit
// dynamic range [-16320, 16320]
++tmp_buf;
}
}
// In place 16x16 2D Hadamard transform
void vp10_hadamard_16x16_c(int16_t const *src_diff, int src_stride,
int16_t *coeff) {
int idx;
for (idx = 0; idx < 4; ++idx) {
// src_diff: 9 bit, dynamic range [-255, 255]
int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride
+ (idx & 0x01) * 8;
vp10_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64);
}
// coeff: 15 bit, dynamic range [-16320, 16320]
for (idx = 0; idx < 64; ++idx) {
int16_t a0 = coeff[0];
int16_t a1 = coeff[64];
int16_t a2 = coeff[128];
int16_t a3 = coeff[192];
int16_t b0 = (a0 + a1) >> 1; // (a0 + a1): 16 bit, [-32640, 32640]
int16_t b1 = (a0 - a1) >> 1; // b0-b3: 15 bit, dynamic range
int16_t b2 = (a2 + a3) >> 1; // [-16320, 16320]
int16_t b3 = (a2 - a3) >> 1;
coeff[0] = b0 + b2; // 16 bit, [-32640, 32640]
coeff[64] = b1 + b3;
coeff[128] = b0 - b2;
coeff[192] = b1 - b3;
++coeff;
}
}
// coeff: 16 bits, dynamic range [-32640, 32640].
// length: value range {16, 64, 256, 1024}.
int16_t vp10_satd_c(const int16_t *coeff, int length) {
int i;
int satd = 0;
for (i = 0; i < length; ++i)
satd += abs(coeff[i]);
// satd: 26 bits, dynamic range [-32640 * 1024, 32640 * 1024]
return (int16_t)satd;
}
// Integer projection onto row vectors.
// height: value range {16, 32, 64}.
void vp10_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref,
const int ref_stride, const int height) {
int idx;
const int norm_factor = height >> 1;
for (idx = 0; idx < 16; ++idx) {
int i;
hbuf[idx] = 0;
// hbuf[idx]: 14 bit, dynamic range [0, 16320].
for (i = 0; i < height; ++i)
hbuf[idx] += ref[i * ref_stride];
// hbuf[idx]: 9 bit, dynamic range [0, 510].
hbuf[idx] /= norm_factor;
++ref;
}
}
// width: value range {16, 32, 64}.
int16_t vp10_int_pro_col_c(uint8_t const *ref, const int width) {
int idx;
int16_t sum = 0;
// sum: 14 bit, dynamic range [0, 16320]
for (idx = 0; idx < width; ++idx)
sum += ref[idx];
return sum;
}
// ref: [0 - 510]
// src: [0 - 510]
// bwl: {2, 3, 4}
int vp10_vector_var_c(int16_t const *ref, int16_t const *src,
const int bwl) {
int i;
int width = 4 << bwl;
int sse = 0, mean = 0, var;
for (i = 0; i < width; ++i) {
int diff = ref[i] - src[i]; // diff: dynamic range [-510, 510], 10 bits.
mean += diff; // mean: dynamic range 16 bits.
sse += diff * diff; // sse: dynamic range 26 bits.
}
// (mean * mean): dynamic range 31 bits.
var = sse - ((mean * mean) >> (bwl + 2));
return var;
}
void vp10_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
int *min, int *max) {
int i, j;
*min = 255;
*max = 0;
for (i = 0; i < 8; ++i, s += p, d += dp) {
for (j = 0; j < 8; ++j) {
int diff = abs(s[j]-d[j]);
*min = diff < *min ? diff : *min;
*max = diff > *max ? diff : *max;
}
}
}
#if CONFIG_VP9_HIGHBITDEPTH
unsigned int vp10_highbd_avg_8x8_c(const uint8_t *s8, int p) {
int i, j;
int sum = 0;
const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
for (i = 0; i < 8; ++i, s+=p)
for (j = 0; j < 8; sum += s[j], ++j) {}
return (sum + 32) >> 6;
}
unsigned int vp10_highbd_avg_4x4_c(const uint8_t *s8, int p) {
int i, j;
int sum = 0;
const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
for (i = 0; i < 4; ++i, s+=p)
for (j = 0; j < 4; sum += s[j], ++j) {}
return (sum + 8) >> 4;
}
void vp10_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
int dp, int *min, int *max) {
int i, j;
const uint16_t* s = CONVERT_TO_SHORTPTR(s8);
const uint16_t* d = CONVERT_TO_SHORTPTR(d8);
*min = 255;
*max = 0;
for (i = 0; i < 8; ++i, s += p, d += dp) {
for (j = 0; j < 8; ++j) {
int diff = abs(s[j]-d[j]);
*min = diff < *min ? diff : *min;
*max = diff > *max ? diff : *max;
}
}
}
#endif // CONFIG_VP9_HIGHBITDEPTH
......@@ -536,16 +536,16 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
if (x8_idx < pixels_wide && y8_idx < pixels_high) {
#if CONFIG_VP9_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
vp10_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
vpx_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
d + y8_idx * dp + x8_idx, dp,
&min, &max);
} else {
vp10_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
d + y8_idx * dp + x8_idx, dp,
&min, &max);
}
#else
vp10_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
vpx_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
d + y8_idx * dp + x8_idx, dp,
&min, &max);
#endif
......@@ -577,18 +577,18 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
int d_avg = 128;
#if CONFIG_VP9_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
s_avg = vp10_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
s_avg = vpx_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
if (!is_key_frame)
d_avg = vp10_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
d_avg = vpx_highbd_avg_4x4(d + y4_idx * dp + x4_idx, dp);
} else {
s_avg = vp10_avg_4x4(s + y4_idx * sp + x4_idx, sp);
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
if (!is_key_frame)
d_avg = vp10_avg_4x4(d + y4_idx * dp + x4_idx, dp);
d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
}
#else
s_avg = vp10_avg_4x4(s + y4_idx * sp + x4_idx, sp);
s_avg = vpx_avg_4x4(s + y4_idx * sp + x4_idx, sp);
if (!is_key_frame)
d_avg = vp10_avg_4x4(d + y4_idx * dp + x4_idx, dp);
d_avg = vpx_avg_4x4(d + y4_idx * dp + x4_idx, dp);
#endif
sum = s_avg - d_avg;
sse = sum * sum;
......@@ -616,18 +616,18 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
int d_avg = 128;
#if CONFIG_VP9_HIGHBITDEPTH
if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
s_avg = vp10_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
s_avg = vpx_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);