Commit 72037944 authored by Dmitry Kovalev's avatar Dmitry Kovalev Committed by Gerrit Code Review

Merge "Removing variance MMX code."

parents 0e361fb8 12cd6f42
......@@ -485,21 +485,6 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(6, 5, subpel_avg_variance64x32_c),
make_tuple(6, 6, subpel_avg_variance64x64_c)));
#if HAVE_MMX
const vp9_variance_fn_t variance4x4_mmx = vp9_variance4x4_mmx;
const vp9_variance_fn_t variance8x8_mmx = vp9_variance8x8_mmx;
const vp9_variance_fn_t variance8x16_mmx = vp9_variance8x16_mmx;
const vp9_variance_fn_t variance16x8_mmx = vp9_variance16x8_mmx;
const vp9_variance_fn_t variance16x16_mmx = vp9_variance16x16_mmx;
INSTANTIATE_TEST_CASE_P(
MMX, VP9VarianceTest,
::testing::Values(make_tuple(2, 2, variance4x4_mmx),
make_tuple(3, 3, variance8x8_mmx),
make_tuple(3, 4, variance8x16_mmx),
make_tuple(4, 3, variance16x8_mmx),
make_tuple(4, 4, variance16x16_mmx)));
#endif
#if HAVE_SSE2
#if CONFIG_USE_X86INC
const vp9_variance_fn_t variance4x4_sse2 = vp9_variance4x4_sse2;
......
......@@ -420,19 +420,19 @@ add_proto qw/unsigned int vp9_variance64x64/, "const uint8_t *src_ptr, int sourc
specialize qw/vp9_variance64x64 avx2/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance16x16 mmx avx2 neon/, "$sse2_x86inc";
specialize qw/vp9_variance16x16 avx2 neon/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance16x8 mmx/, "$sse2_x86inc";
specialize qw/vp9_variance16x8/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance8x16 mmx/, "$sse2_x86inc";
specialize qw/vp9_variance8x16/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance8x8 mmx neon/, "$sse2_x86inc";
specialize qw/vp9_variance8x8 neon/, "$sse2_x86inc";
add_proto qw/void vp9_get8x8var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
specialize qw/vp9_get8x8var mmx neon/, "$sse2_x86inc";
specialize qw/vp9_get8x8var neon/, "$sse2_x86inc";
add_proto qw/void vp9_get16x16var/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse, int *sum";
specialize qw/vp9_get16x16var avx2 neon/, "$sse2_x86inc";
......@@ -444,7 +444,7 @@ add_proto qw/unsigned int vp9_variance4x8/, "const uint8_t *src_ptr, int source_
specialize qw/vp9_variance4x8/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_variance4x4 mmx/, "$sse2_x86inc";
specialize qw/vp9_variance4x4/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/vp9_sub_pixel_variance64x64 avx2/, "$sse2_x86inc", "$ssse3_x86inc";
......@@ -693,7 +693,7 @@ add_proto qw/void vp9_sad4x4x4d/, "const uint8_t *src_ptr, int src_stride, cons
specialize qw/vp9_sad4x4x4d sse/;
add_proto qw/unsigned int vp9_mse16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
specialize qw/vp9_mse16x16 mmx avx2/, "$sse2_x86inc";
specialize qw/vp9_mse16x16 avx2/, "$sse2_x86inc";
add_proto qw/unsigned int vp9_mse8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int recon_stride, unsigned int *sse";
specialize qw/vp9_mse8x16/;
......@@ -705,7 +705,7 @@ add_proto qw/unsigned int vp9_mse8x8/, "const uint8_t *src_ptr, int source_stri
specialize qw/vp9_mse8x8/;
add_proto qw/unsigned int vp9_get_mb_ss/, "const int16_t *";
specialize qw/vp9_get_mb_ss mmx sse2/;
specialize qw/vp9_get_mb_ss sse2/;
# ENCODEMB INVOKE
add_proto qw/int64_t vp9_block_error/, "const int16_t *coeff, const int16_t *dqcoeff, intptr_t block_size, int64_t *ssz";
......
This diff is collapsed.
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "./vpx_config.h"
#include "vp9/encoder/vp9_variance.h"
#include "vpx_ports/mem.h"
unsigned int vp9_get8x8var_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse, int *sum);
unsigned int vp9_get4x4var_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *SSE, int *sum);
unsigned int vp9_variance4x4_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
vp9_get4x4var_mmx(src, src_stride, ref, ref_stride, sse, &sum);
return *sse - (((unsigned int)sum * sum) >> 4);
}
unsigned int vp9_variance8x8_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
int sum;
vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, sse, &sum);
return *sse - (((unsigned int)sum * sum) >> 6);
}
unsigned int vp9_mse16x16_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
unsigned int sse0, sse1, sse2, sse3;
int sum0, sum1, sum2, sum3;
vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1);
vp9_get8x8var_mmx(src + 8 * src_stride, src_stride,
ref + 8 * ref_stride, ref_stride, &sse2, &sum2);
vp9_get8x8var_mmx(src + 8 * src_stride + 8, src_stride,
ref + 8 * ref_stride + 8, ref_stride, &sse3, &sum3);
*sse = sse0 + sse1 + sse2 + sse3;
return *sse;
}
unsigned int vp9_variance16x16_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
unsigned int sse0, sse1, sse2, sse3;
int sum0, sum1, sum2, sum3, sum;
vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1);
vp9_get8x8var_mmx(src + 8 * src_stride, src_stride,
ref + 8 * ref_stride, ref_stride, &sse2, &sum2);
vp9_get8x8var_mmx(src + 8 * src_stride + 8, src_stride,
ref + 8 * ref_stride + 8, ref_stride, &sse3, &sum3);
*sse = sse0 + sse1 + sse2 + sse3;
sum = sum0 + sum1 + sum2 + sum3;
return *sse - (((unsigned int)sum * sum) >> 8);
}
unsigned int vp9_variance16x8_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
unsigned int sse0, sse1;
int sum0, sum1, sum;
vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src + 8, src_stride, ref + 8, ref_stride, &sse1, &sum1);
*sse = sse0 + sse1;
sum = sum0 + sum1;
return *sse - (((unsigned int)sum * sum) >> 7);
}
unsigned int vp9_variance8x16_mmx(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
unsigned int *sse) {
unsigned int sse0, sse1;
int sum0, sum1, sum;
vp9_get8x8var_mmx(src, src_stride, ref, ref_stride, &sse0, &sum0);
vp9_get8x8var_mmx(src + 8 * src_stride, src_stride,
ref + 8 * ref_stride, ref_stride, &sse1, &sum1);
*sse = sse0 + sse1;
sum = sum0 + sum1;
return *sse - (((unsigned int)sum * sum) >> 7);
}
......@@ -93,8 +93,6 @@ VP9_CX_SRCS-yes += encoder/vp9_temporal_filter.h
VP9_CX_SRCS-yes += encoder/vp9_mbgraph.c
VP9_CX_SRCS-yes += encoder/vp9_mbgraph.h
VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_mmx.c
VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_variance_impl_mmx.asm
VP9_CX_SRCS-$(HAVE_MMX) += encoder/x86/vp9_sad_mmx.asm
VP9_CX_SRCS-$(HAVE_SSE2) += encoder/x86/vp9_variance_impl_sse2.asm
VP9_CX_SRCS-$(HAVE_AVX2) += encoder/x86/vp9_variance_impl_intrin_avx2.c
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment