Commit 0f3c94e1 authored by David Barker's avatar David Barker Committed by Debargha Mukherjee
Browse files

ext-inter: Delete dead code

Patches https://aomedia-review.googlesource.com/c/11987/
and https://aomedia-review.googlesource.com/c/11988/
replaced the old masked motion search pipeline with
a new one which uses different SAD/SSE functions.
This resulted in a lot of dead code.

This patch removes the now-dead code. Note that this
includes vectorized SAD/SSE functions, which will need
to be rewritten at some point for the new pipeline. It
also includes the masked_compound_variance_* functions
since these turned out not to be used by the new pipeline.

To help with the later addition of vectorized functions, the
masked_sad/variance_test.cc files are kept but are modified
to work with the new functions. The tests are then disabled
until we actually have the vectorized functions.

Change-Id: I61b686abd14bba5280bed94e1be62eb74ea23d89
parent 680b09ea
......@@ -317,11 +317,6 @@ if (CONFIG_AV1_ENCODER)
"${AOM_ROOT}/aom_dsp/x86/variance_sse2.c"
"${AOM_ROOT}/aom_dsp/x86/sum_squares_sse2.c")
set(AOM_DSP_ENCODER_INTRIN_SSSE3
${AOM_DSP_ENCODER_INTRIN_SSSE3}
"${AOM_ROOT}/aom_dsp/x86/masked_sad_intrin_ssse3.c"
"${AOM_ROOT}/aom_dsp/x86/masked_variance_intrin_ssse3.c")
set(AOM_DSP_ENCODER_ASM_SSSE3_X86_64
${AOM_DSP_ENCODER_ASM_SSSE3_X86_64}
"${AOM_ROOT}/aom_dsp/x86/avg_ssse3_x86_64.asm"
......
......@@ -342,10 +342,6 @@ DSP_SRCS-$(HAVE_AVX2) += x86/sad_highbd_avx2.c
endif
ifeq ($(CONFIG_AV1_ENCODER),yes)
ifeq ($(CONFIG_EXT_INTER),yes)
DSP_SRCS-$(HAVE_SSSE3) += x86/masked_sad_intrin_ssse3.c
DSP_SRCS-$(HAVE_SSSE3) += x86/masked_variance_intrin_ssse3.c
endif #CONFIG_EXT_INTER
ifeq ($(CONFIG_MOTION_VAR),yes)
DSP_SRCS-$(HAVE_SSE4_1) += x86/obmc_sad_sse4.c
DSP_SRCS-$(HAVE_SSE4_1) += x86/obmc_variance_sse4.c
......
......@@ -738,17 +738,12 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
if (aom_config("CONFIG_EXT_INTER") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
specialize "aom_masked_sad${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_masked_compound_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
specialize "aom_highbd_masked_sad${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_highbd_masked_compound_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
}
}
......@@ -1048,12 +1043,6 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
#
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
specialize "aom_masked_variance${w}x${h}", qw/ssse3/;
specialize "aom_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_masked_compound_variance${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *m, int m_stride, int invert_mask, unsigned int *sse";
add_proto qw/unsigned int/, "aom_masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
......@@ -1061,12 +1050,6 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
foreach $bd ("_", "_10_", "_12_") {
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_highbd${bd}masked_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
specialize "aom_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
specialize "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_highbd${bd}masked_compound_variance${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *m, int m_stride, int invert_mask, unsigned int *sse";
add_proto qw/unsigned int/, "aom_highbd${bd}masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
}
......
......@@ -312,30 +312,11 @@ highbd_sadMxNx4D(4, 4)
#if CONFIG_AV1 && CONFIG_EXT_INTER
static INLINE
unsigned int masked_sad(const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m, int m_stride,
int width, int height) {
int y, x;
unsigned int sad = 0;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) sad += m[x] * abs(a[x] - b[x]);
a += a_stride;
b += b_stride;
m += m_stride;
}
sad = (sad + 31) >> 6;
return sad;
}
static INLINE unsigned int masked_compound_sad(const uint8_t *src,
int src_stride, const uint8_t *a,
int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m,
int m_stride, int width,
int height) {
unsigned int masked_compound_sad(const uint8_t *src, int src_stride,
const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
const uint8_t *m, int m_stride, int width,
int height) {
int y, x;
unsigned int sad = 0;
......@@ -356,12 +337,6 @@ static INLINE unsigned int masked_compound_sad(const uint8_t *src,
}
#define MASKSADMxN(m, n) \
unsigned int aom_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad(src, src_stride, ref, ref_stride, msk, msk_stride, m, \
n); \
} \
unsigned int aom_masked_compound_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
......@@ -397,31 +372,11 @@ MASKSADMxN(4, 4)
#if CONFIG_HIGHBITDEPTH
static INLINE
unsigned int highbd_masked_sad(const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
const uint8_t *m, int m_stride, int width,
int height) {
int y, x;
unsigned int sad = 0;
const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) sad += m[x] * abs(a[x] - b[x]);
a += a_stride;
b += b_stride;
m += m_stride;
}
sad = (sad + 31) >> 6;
return sad;
}
static INLINE unsigned int highbd_masked_compound_sad(
const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, const uint8_t *m, int m_stride, int width,
int height) {
unsigned int highbd_masked_compound_sad(const uint8_t *src8, int src_stride,
const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
const uint8_t *m, int m_stride,
int width, int height) {
int y, x;
unsigned int sad = 0;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
......@@ -445,12 +400,6 @@ static INLINE unsigned int highbd_masked_compound_sad(
}
#define HIGHBD_MASKSADMXN(m, n) \
unsigned int aom_highbd_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad(src, src_stride, ref, ref_stride, msk, \
msk_stride, m, n); \
} \
unsigned int aom_highbd_masked_compound_sad##m##x##n##_c( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
......
This diff is collapsed.
......@@ -55,26 +55,10 @@ typedef unsigned int (*aom_subp_avg_variance_fn_t)(
int b_stride, unsigned int *sse, const uint8_t *second_pred);
#if CONFIG_AV1 && CONFIG_EXT_INTER
typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
const uint8_t *msk_ptr,
int msk_stride);
typedef unsigned int (*aom_masked_variance_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *msk, int msk_stride, unsigned int *sse);
typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse);
typedef unsigned int (*aom_masked_compound_sad_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *second_pred, const uint8_t *msk, int msk_stride,
int invert_mask);
typedef unsigned int (*aom_masked_compound_variance_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *second_pred, const uint8_t *m, int m_stride, int invert_mask,
unsigned int *sse);
typedef unsigned int (*aom_masked_compound_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
......@@ -106,12 +90,7 @@ typedef struct aom_variance_vtable {
aom_sad_multi_fn_t sdx8f;
aom_sad_multi_d_fn_t sdx4df;
#if CONFIG_EXT_INTER
aom_masked_sad_fn_t msdf;
aom_masked_variance_fn_t mvf;
aom_masked_subpixvariance_fn_t msvf;
aom_masked_compound_sad_fn_t mcsdf;
aom_masked_compound_variance_fn_t mcvf;
aom_masked_compound_subpixvariance_fn_t mcsvf;
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
......
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <stdlib.h>
#include <emmintrin.h>
#include <tmmintrin.h>
#include "aom_ports/mem.h"
#include "./aom_config.h"
#include "aom/aom_integer.h"
static INLINE __m128i width8_load_2rows(const uint8_t *ptr, int stride) {
__m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
__m128i temp2 = _mm_loadl_epi64((const __m128i *)(ptr + stride));
return _mm_unpacklo_epi64(temp1, temp2);
}
static INLINE __m128i width4_load_4rows(const uint8_t *ptr, int stride) {
__m128i temp1 = _mm_cvtsi32_si128(*(const uint32_t *)ptr);
__m128i temp2 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride));
__m128i temp3 = _mm_unpacklo_epi32(temp1, temp2);
temp1 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride * 2));
temp2 = _mm_cvtsi32_si128(*(const uint32_t *)(ptr + stride * 3));
temp1 = _mm_unpacklo_epi32(temp1, temp2);
return _mm_unpacklo_epi64(temp3, temp1);
}
static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride,
int width, int height);
static INLINE unsigned int masked_sad8xh_ssse3(
const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int height);
static INLINE unsigned int masked_sad4xh_ssse3(
const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int height);
#define MASKSADMXN_SSSE3(m, n) \
unsigned int aom_masked_sad##m##x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, msk_stride, \
m, n); \
}
#if CONFIG_EXT_PARTITION
MASKSADMXN_SSSE3(128, 128)
MASKSADMXN_SSSE3(128, 64)
MASKSADMXN_SSSE3(64, 128)
#endif // CONFIG_EXT_PARTITION
MASKSADMXN_SSSE3(64, 64)
MASKSADMXN_SSSE3(64, 32)
MASKSADMXN_SSSE3(32, 64)
MASKSADMXN_SSSE3(32, 32)
MASKSADMXN_SSSE3(32, 16)
MASKSADMXN_SSSE3(16, 32)
MASKSADMXN_SSSE3(16, 16)
MASKSADMXN_SSSE3(16, 8)
#define MASKSAD8XN_SSSE3(n) \
unsigned int aom_masked_sad8x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, msk, \
msk_stride, n); \
}
MASKSAD8XN_SSSE3(16)
MASKSAD8XN_SSSE3(8)
MASKSAD8XN_SSSE3(4)
#define MASKSAD4XN_SSSE3(n) \
unsigned int aom_masked_sad4x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
msk_stride, n); \
}
MASKSAD4XN_SSSE3(8)
MASKSAD4XN_SSSE3(4)
// For width a multiple of 16
// Assumes values in m are <=64
static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride,
int width, int height) {
int y, x;
__m128i a, b, m, temp1, temp2;
__m128i res = _mm_setzero_si128();
__m128i one = _mm_set1_epi16(1);
// For each row
for (y = 0; y < height; y++) {
// Covering the full width
for (x = 0; x < width; x += 16) {
// Load a, b, m in xmm registers
a = _mm_loadu_si128((const __m128i *)(a_ptr + x));
b = _mm_loadu_si128((const __m128i *)(b_ptr + x));
m = _mm_loadu_si128((const __m128i *)(m_ptr + x));
// Calculate the difference between a & b
temp1 = _mm_subs_epu8(a, b);
temp2 = _mm_subs_epu8(b, a);
temp1 = _mm_or_si128(temp1, temp2);
// Multiply by m and add together
temp2 = _mm_maddubs_epi16(temp1, m);
// Pad out row result to 32 bit integers & add to running total
res = _mm_add_epi32(res, _mm_madd_epi16(temp2, one));
}
// Move onto the next row
a_ptr += a_stride;
b_ptr += b_stride;
m_ptr += m_stride;
}
res = _mm_hadd_epi32(res, _mm_setzero_si128());
res = _mm_hadd_epi32(res, _mm_setzero_si128());
// sad = (sad + 31) >> 6;
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
static INLINE unsigned int masked_sad8xh_ssse3(
const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int height) {
int y;
__m128i a, b, m, temp1, temp2, row_res;
__m128i res = _mm_setzero_si128();
__m128i one = _mm_set1_epi16(1);
// Add the masked SAD for 2 rows at a time
for (y = 0; y < height; y += 2) {
// Load a, b, m in xmm registers
a = width8_load_2rows(a_ptr, a_stride);
b = width8_load_2rows(b_ptr, b_stride);
m = width8_load_2rows(m_ptr, m_stride);
// Calculate the difference between a & b
temp1 = _mm_subs_epu8(a, b);
temp2 = _mm_subs_epu8(b, a);
temp1 = _mm_or_si128(temp1, temp2);
// Multiply by m and add together
row_res = _mm_maddubs_epi16(temp1, m);
// Pad out row result to 32 bit integers & add to running total
res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
// Move onto the next rows
a_ptr += a_stride * 2;
b_ptr += b_stride * 2;
m_ptr += m_stride * 2;
}
res = _mm_hadd_epi32(res, _mm_setzero_si128());
res = _mm_hadd_epi32(res, _mm_setzero_si128());
// sad = (sad + 31) >> 6;
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
static INLINE unsigned int masked_sad4xh_ssse3(
const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int height) {
int y;
__m128i a, b, m, temp1, temp2, row_res;
__m128i res = _mm_setzero_si128();
__m128i one = _mm_set1_epi16(1);
// Add the masked SAD for 4 rows at a time
for (y = 0; y < height; y += 4) {
// Load a, b, m in xmm registers
a = width4_load_4rows(a_ptr, a_stride);
b = width4_load_4rows(b_ptr, b_stride);
m = width4_load_4rows(m_ptr, m_stride);
// Calculate the difference between a & b
temp1 = _mm_subs_epu8(a, b);
temp2 = _mm_subs_epu8(b, a);
temp1 = _mm_or_si128(temp1, temp2);
// Multiply by m and add together
row_res = _mm_maddubs_epi16(temp1, m);
// Pad out row result to 32 bit integers & add to running total
res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
// Move onto the next rows
a_ptr += a_stride * 4;
b_ptr += b_stride * 4;
m_ptr += m_stride * 4;
}
// Pad out row result to 32 bit integers & add to running total
res = _mm_hadd_epi32(res, _mm_setzero_si128());
res = _mm_hadd_epi32(res, _mm_setzero_si128());
// sad = (sad + 31) >> 6;
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
#if CONFIG_HIGHBITDEPTH
static INLINE __m128i highbd_width4_load_2rows(const uint16_t *ptr,
int stride) {
__m128i temp1 = _mm_loadl_epi64((const __m128i *)ptr);
__m128i temp2 = _mm_loadl_epi64((const __m128i *)(ptr + stride));
return _mm_unpacklo_epi64(temp1, temp2);
}
static INLINE unsigned int highbd_masked_sad_ssse3(
const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int width, int height);
static INLINE unsigned int highbd_masked_sad4xh_ssse3(
const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int height);
#define HIGHBD_MASKSADMXN_SSSE3(m, n) \
unsigned int aom_highbd_masked_sad##m##x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, \
msk_stride, m, n); \
}
#if CONFIG_EXT_PARTITION
HIGHBD_MASKSADMXN_SSSE3(128, 128)
HIGHBD_MASKSADMXN_SSSE3(128, 64)
HIGHBD_MASKSADMXN_SSSE3(64, 128)
#endif // CONFIG_EXT_PARTITION
HIGHBD_MASKSADMXN_SSSE3(64, 64)
HIGHBD_MASKSADMXN_SSSE3(64, 32)
HIGHBD_MASKSADMXN_SSSE3(32, 64)
HIGHBD_MASKSADMXN_SSSE3(32, 32)
HIGHBD_MASKSADMXN_SSSE3(32, 16)
HIGHBD_MASKSADMXN_SSSE3(16, 32)
HIGHBD_MASKSADMXN_SSSE3(16, 16)
HIGHBD_MASKSADMXN_SSSE3(16, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 16)
HIGHBD_MASKSADMXN_SSSE3(8, 8)
HIGHBD_MASKSADMXN_SSSE3(8, 4)
#define HIGHBD_MASKSAD4XN_SSSE3(n) \
unsigned int aom_highbd_masked_sad4x##n##_ssse3( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
msk_stride, n); \
}
HIGHBD_MASKSAD4XN_SSSE3(8)
HIGHBD_MASKSAD4XN_SSSE3(4)
// For width a multiple of 8
// Assumes values in m are <=64
static INLINE unsigned int highbd_masked_sad_ssse3(
const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int width, int height) {
int y, x;
__m128i a, b, m, temp1, temp2;
const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
__m128i res = _mm_setzero_si128();
// For each row
for (y = 0; y < height; y++) {
// Covering the full width
for (x = 0; x < width; x += 8) {
// Load a, b, m in xmm registers
a = _mm_loadu_si128((const __m128i *)(a_ptr + x));
b = _mm_loadu_si128((const __m128i *)(b_ptr + x));
m = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(m_ptr + x)),
_mm_setzero_si128());
// Calculate the difference between a & b
temp1 = _mm_subs_epu16(a, b);
temp2 = _mm_subs_epu16(b, a);
temp1 = _mm_or_si128(temp1, temp2);
// Add result of multiplying by m and add pairs together to running total
res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
}
// Move onto the next row
a_ptr += a_stride;
b_ptr += b_stride;
m_ptr += m_stride;
}
res = _mm_hadd_epi32(res, _mm_setzero_si128());
res = _mm_hadd_epi32(res, _mm_setzero_si128());
// sad = (sad + 31) >> 6;
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
static INLINE unsigned int highbd_masked_sad4xh_ssse3(
const uint8_t *a8_ptr, int a_stride, const uint8_t *b8_ptr, int b_stride,
const uint8_t *m_ptr, int m_stride, int height) {
int y;
__m128i a, b, m, temp1, temp2;
const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
__m128i res = _mm_setzero_si128();
// Add the masked SAD for 2 rows at a time
for (y = 0; y < height; y += 2) {
// Load a, b, m in xmm registers
a = highbd_width4_load_2rows(a_ptr, a_stride);
b = highbd_width4_load_2rows(b_ptr, b_stride);
temp1 = _mm_loadl_epi64((const __m128i *)m_ptr);
temp2 = _mm_loadl_epi64((const __m128i *)(m_ptr + m_stride));
m = _mm_unpacklo_epi8(_mm_unpacklo_epi32(temp1, temp2),
_mm_setzero_si128());
// Calculate the difference between a & b
temp1 = _mm_subs_epu16(a, b);
temp2 = _mm_subs_epu16(b, a);
temp1 = _mm_or_si128(temp1, temp2);
// Multiply by m and add together
res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
// Move onto the next rows
a_ptr += a_stride * 2;
b_ptr += b_stride * 2;
m_ptr += m_stride * 2;
}
res = _mm_hadd_epi32(res, _mm_setzero_si128());
res = _mm_hadd_epi32(res, _mm_setzero_si128());
// sad = (sad + 31) >> 6;
return (_mm_cvtsi128_si32(res) + 31) >> 6;
}
#endif // CONFIG_HIGHBITDEPTH
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -136,27 +136,6 @@ int av1_full_pixel_search(const struct AV1_COMP *cpi, MACROBLOCK *x,
int error_per_bit, int *cost_list, const MV *ref_mv,
int var_max, int rd);
#if CONFIG_EXT_INTER
int av1_find_best_masked_sub_pixel_tree(
const MACROBLOCK *x, const uint8_t *mask, int mask_stride, MV *bestmv,
const MV *ref_mv, int allow_hp, int error_per_bit,
const aom_variance_fn_ptr_t *vfp, int forced_stop, int iters_per_step,
int *mvjcost, int *mvcost[2], int *distortion, unsigned int *sse1,
int is_second);
int av1_find_best_masked_sub_pixel_tree_up(
const struct AV1_COMP *cpi, MACROBLOCK *x, const uint8_t *mask,
int mask_stride, int mi_row, int mi_col, MV *bestmv, const MV *ref_mv,
int allow_hp, int error_per_bit, const aom_variance_fn_ptr_t *vfp,
int forced_stop, int iters_per_step, int *mvjcost, int *mvcost[2],
int *distortion, unsigned int *sse1, int is_second, int use_upsampled_ref);
int av1_masked_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
const uint8_t *mask, int mask_stride,
MV *mvp_full, int step_param, int sadpb,
int further_steps, int do_refine,
const aom_variance_fn_ptr_t *fn_ptr,
const MV *ref_mv, MV *dst_mv, int is_second);
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
int av1_obmc_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
MV *mvp_full, int step_param, int sadpb,
......
......@@ -27,9 +27,11 @@ using libaom_test::ACMRandom;
namespace {
const int number_of_iterations = 500;
typedef unsigned int (*MaskedSADFunc)(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
const uint8_t *m, int m_stride);
typedef unsigned int (*MaskedSADFunc)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
const uint8_t *second_pred,
const uint8_t *msk, int msk_stride,
int invert_mask);
typedef std::tr1::tuple<MaskedSADFunc, MaskedSADFunc> MaskedSADParam;
class MaskedSADTest : public ::testing::TestWithParam<MaskedSADParam> {
......@@ -52,6 +54,7 @@ TEST_P(MaskedSADTest, OperationCheck) {
ACMRandom