Commit f19f35f7 authored by David Barker's avatar David Barker Committed by Debargha Mukherjee

ext-inter: Further cleanup

* Rename the 'masked_compound_*' functions to just 'masked_*'.
  The previous names were intended to be temporary, to distinguish
  the old and new masked motion search pipelines. But now that the
  old pipeline has been removed, we can reuse the old names.

* Simplify the new ext-inter compound motion search pipeline
  a bit.

* Harmonize names: Rename
  aom_highbd_masked_compound_sub_pixel_variance* to
  aom_highbd_8_masked_sub_pixel_variance*, to match the naming of
  the corresponding non-masked functions

Change-Id: I988768ffe2f42a942405b7d8e93a2757a012dca3
parent 73826033
......@@ -740,13 +740,13 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
if (aom_config("CONFIG_EXT_INTER") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_masked_compound_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_highbd_masked_compound_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
}
}
}
......@@ -1045,14 +1045,14 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
#
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
foreach $bd ("_", "_10_", "_12_") {
foreach $bd ("_8_", "_10_", "_12_") {
foreach (@block_sizes) {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_highbd${bd}masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
}
}
......
......@@ -312,11 +312,10 @@ highbd_sadMxNx4D(4, 4)
#if CONFIG_AV1 && CONFIG_EXT_INTER
static INLINE
unsigned int masked_compound_sad(const uint8_t *src, int src_stride,
const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
const uint8_t *m, int m_stride, int width,
int height) {
unsigned int masked_sad(const uint8_t *src, int src_stride,
const uint8_t *a, int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m, int m_stride,
int width, int height) {
int y, x;
unsigned int sad = 0;
......@@ -336,17 +335,17 @@ highbd_sadMxNx4D(4, 4)
return sad;
}
#define MASKSADMxN(m, n) \
unsigned int aom_masked_compound_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
int invert_mask) { \
if (!invert_mask) \
return masked_compound_sad(src, src_stride, ref, ref_stride, \
second_pred, m, msk, msk_stride, m, n); \
else \
return masked_compound_sad(src, src_stride, second_pred, m, ref, \
ref_stride, msk, msk_stride, m, n); \
#define MASKSADMxN(m, n) \
unsigned int aom_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
int invert_mask) { \
if (!invert_mask) \
return masked_sad(src, src_stride, ref, ref_stride, second_pred, m, msk, \
msk_stride, m, n); \
else \
return masked_sad(src, src_stride, second_pred, m, ref, ref_stride, msk, \
msk_stride, m, n); \
}
/* clang-format off */
......@@ -372,11 +371,11 @@ MASKSADMxN(4, 4)
#if CONFIG_HIGHBITDEPTH
static INLINE
unsigned int highbd_masked_compound_sad(const uint8_t *src8, int src_stride,
const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
const uint8_t *m, int m_stride,
int width, int height) {
unsigned int highbd_masked_sad(const uint8_t *src8, int src_stride,
const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride,
const uint8_t *m, int m_stride, int width,
int height) {
int y, x;
unsigned int sad = 0;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
......@@ -399,19 +398,17 @@ MASKSADMxN(4, 4)
return sad;
}
#define HIGHBD_MASKSADMXN(m, n) \
unsigned int aom_highbd_masked_compound_sad##m##x##n##_c( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
int msk_stride, int invert_mask) { \
if (!invert_mask) \
return highbd_masked_compound_sad(src8, src_stride, ref8, ref_stride, \
second_pred8, m, msk, msk_stride, m, \
n); \
else \
return highbd_masked_compound_sad(src8, src_stride, second_pred8, m, \
ref8, ref_stride, msk, msk_stride, m, \
n); \
#define HIGHBD_MASKSADMXN(m, n) \
unsigned int aom_highbd_masked_sad##m##x##n##_c( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
int msk_stride, int invert_mask) { \
if (!invert_mask) \
return highbd_masked_sad(src8, src_stride, ref8, ref_stride, \
second_pred8, m, msk, msk_stride, m, n); \
else \
return highbd_masked_sad(src8, src_stride, second_pred8, m, ref8, \
ref_stride, msk, msk_stride, m, n); \
}
#if CONFIG_EXT_PARTITION
......
......@@ -714,24 +714,24 @@ void aom_comp_mask_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
}
}
#define MASK_SUBPIX_VAR(W, H) \
unsigned int aom_masked_compound_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint8_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
bilinear_filters_2t[xoffset]); \
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
aom_comp_mask_pred(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
invert_mask); \
return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
#define MASK_SUBPIX_VAR(W, H) \
unsigned int aom_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
unsigned int *sse) { \
uint16_t fdata3[(H + 1) * W]; \
uint8_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
bilinear_filters_2t[xoffset]); \
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
aom_comp_mask_pred_c(temp3, second_pred, W, H, temp2, W, msk, msk_stride, \
invert_mask); \
return aom_variance##W##x##H##_c(temp3, W, ref, ref_stride, sse); \
}
MASK_SUBPIX_VAR(4, 4)
......@@ -800,7 +800,7 @@ void aom_highbd_comp_mask_upsampled_pred_c(uint16_t *comp_pred,
}
#define HIGHBD_MASK_SUBPIX_VAR(W, H) \
unsigned int aom_highbd_masked_compound_sub_pixel_variance##W##x##H##_c( \
unsigned int aom_highbd_8_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
......@@ -822,7 +822,7 @@ void aom_highbd_comp_mask_upsampled_pred_c(uint16_t *comp_pred,
ref, ref_stride, sse); \
} \
\
unsigned int aom_highbd_10_masked_compound_sub_pixel_variance##W##x##H##_c( \
unsigned int aom_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
......@@ -844,7 +844,7 @@ void aom_highbd_comp_mask_upsampled_pred_c(uint16_t *comp_pred,
ref, ref_stride, sse); \
} \
\
unsigned int aom_highbd_12_masked_compound_sub_pixel_variance##W##x##H##_c( \
unsigned int aom_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
const uint8_t *src, int src_stride, int xoffset, int yoffset, \
const uint8_t *ref, int ref_stride, const uint8_t *second_pred, \
const uint8_t *msk, int msk_stride, int invert_mask, \
......
......@@ -55,11 +55,12 @@ typedef unsigned int (*aom_subp_avg_variance_fn_t)(
int b_stride, unsigned int *sse, const uint8_t *second_pred);
#if CONFIG_AV1 && CONFIG_EXT_INTER
typedef unsigned int (*aom_masked_compound_sad_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *second_pred, const uint8_t *msk, int msk_stride,
int invert_mask);
typedef unsigned int (*aom_masked_compound_subpixvariance_fn_t)(
typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
const uint8_t *second_pred,
const uint8_t *msk, int msk_stride,
int invert_mask);
typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse);
......@@ -90,8 +91,8 @@ typedef struct aom_variance_vtable {
aom_sad_multi_fn_t sdx8f;
aom_sad_multi_d_fn_t sdx4df;
#if CONFIG_EXT_INTER
aom_masked_compound_sad_fn_t mcsdf;
aom_masked_compound_subpixvariance_fn_t mcsvf;
aom_masked_sad_fn_t msdf;
aom_masked_subpixvariance_fn_t msvf;
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
aom_obmc_sad_fn_t osdf;
......
This diff is collapsed.
......@@ -177,30 +177,30 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
/* checks if (r, c) has better score than previous best */
#if CONFIG_EXT_INTER
#define CHECK_BETTER(v, r, c) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
MV this_mv = { r, c }; \
v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
if (second_pred == NULL) \
thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, &sse); \
else if (mask) \
thismse = vfp->mcsvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, second_pred, mask, \
mask_stride, invert_mask, &sse); \
else \
thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, &sse, second_pred); \
v += thismse; \
if (v < besterr) { \
besterr = v; \
br = r; \
bc = c; \
*distortion = thismse; \
*sse1 = sse; \
} \
} else { \
v = INT_MAX; \
#define CHECK_BETTER(v, r, c) \
if (c >= minc && c <= maxc && r >= minr && r <= maxr) { \
MV this_mv = { r, c }; \
v = mv_err_cost(&this_mv, ref_mv, mvjcost, mvcost, error_per_bit); \
if (second_pred == NULL) \
thismse = vfp->svf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, &sse); \
else if (mask) \
thismse = vfp->msvf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, second_pred, mask, \
mask_stride, invert_mask, &sse); \
else \
thismse = vfp->svaf(pre(y, y_stride, r, c), y_stride, sp(c), sp(r), \
src_address, src_stride, &sse, second_pred); \
v += thismse; \
if (v < besterr) { \
besterr = v; \
br = r; \
bc = c; \
*distortion = thismse; \
*sse1 = sse; \
} \
} else { \
v = INT_MAX; \
}
#else
#define CHECK_BETTER(v, r, c) \
......@@ -224,7 +224,7 @@ static INLINE const uint8_t *pre(const uint8_t *buf, int stride, int r, int c) {
} else { \
v = INT_MAX; \
}
#endif
#endif // CONFIG_EXT_INTER
#define CHECK_BETTER0(v, r, c) CHECK_BETTER(v, r, c)
......@@ -272,7 +272,7 @@ static INLINE const uint8_t *upre(const uint8_t *buf, int stride, int r,
} else { \
v = INT_MAX; \
}
#endif
#endif // CONFIG_EXT_INTER
#define FIRST_LEVEL_CHECKS \
{ \
......@@ -861,9 +861,9 @@ int av1_find_best_sub_pixel_tree(
src_address, src_stride, &sse);
#if CONFIG_EXT_INTER
else if (mask)
thismse = vfp->mcsvf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, second_pred, mask,
mask_stride, invert_mask, &sse);
thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, second_pred, mask,
mask_stride, invert_mask, &sse);
#endif
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
......@@ -910,9 +910,9 @@ int av1_find_best_sub_pixel_tree(
src_stride, &sse);
#if CONFIG_EXT_INTER
else if (mask)
thismse = vfp->mcsvf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, second_pred, mask,
mask_stride, invert_mask, &sse);
thismse = vfp->msvf(pre_address, y_stride, sp(tc), sp(tr),
src_address, src_stride, second_pred, mask,
mask_stride, invert_mask, &sse);
#endif
else
thismse = vfp->svaf(pre_address, y_stride, sp(tc), sp(tr),
......@@ -1389,9 +1389,9 @@ int av1_get_mvpred_mask_var(const MACROBLOCK *x, const MV *best_mv,
const MV mv = { best_mv->row * 8, best_mv->col * 8 };
unsigned int unused;
return vfp->mcsvf(what->buf, what->stride, 0, 0,
get_buf_from_mv(in_what, best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask, &unused) +
return vfp->msvf(what->buf, what->stride, 0, 0,
get_buf_from_mv(in_what, best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask, &unused) +
(use_mvcost ? mv_err_cost(&mv, center_mv, x->nmvjointcost, x->mvcost,
x->errorperbit)
: 0);
......@@ -2362,7 +2362,7 @@ int av1_refining_search_sad(MACROBLOCK *x, MV *ref_mv, int error_per_bit,
}
// This function is called when we do joint motion search in comp_inter_inter
// mode.
// mode, or when searching for one component of an ext-inter compound mode.
int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
#if CONFIG_EXT_INTER
......@@ -2384,9 +2384,9 @@ int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
x->mv_limits.row_min, x->mv_limits.row_max);
#if CONFIG_EXT_INTER
if (mask)
best_sad = fn_ptr->mcsdf(what->buf, what->stride,
get_buf_from_mv(in_what, best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask) +
best_sad = fn_ptr->msdf(what->buf, what->stride,
get_buf_from_mv(in_what, best_mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask) +
mvsad_err_cost(x, best_mv, &fcenter_mv, error_per_bit);
else
#endif
......@@ -2406,9 +2406,9 @@ int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
unsigned int sad;
#if CONFIG_EXT_INTER
if (mask)
sad = fn_ptr->mcsdf(what->buf, what->stride,
get_buf_from_mv(in_what, &mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask);
sad = fn_ptr->msdf(what->buf, what->stride,
get_buf_from_mv(in_what, &mv), in_what->stride,
second_pred, mask, mask_stride, invert_mask);
else
#endif
sad = fn_ptr->sdaf(what->buf, what->stride,
......
This diff is collapsed.
......@@ -162,80 +162,64 @@ using std::tr1::make_tuple;
// TODO(david.barker): Re-enable this once we have vectorized
// versions of the masked_compound_* functions
#if 0 && HAVE_SSSE3
INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, MaskedSADTest,
::testing::Values(
INSTANTIATE_TEST_CASE_P(
SSSE3_C_COMPARE, MaskedSADTest,
::testing::Values(
#if CONFIG_EXT_PARTITION
make_tuple(&aom_masked_compound_sad128x128_ssse3,
&aom_masked_compound_sad128x128_c),
make_tuple(&aom_masked_compound_sad128x64_ssse3,
&aom_masked_compound_sad128x64_c),
make_tuple(&aom_masked_compound_sad64x128_ssse3,
&aom_masked_compound_sad64x128_c),
make_tuple(&aom_masked_sad128x128_ssse3, &aom_masked_sad128x128_c),
make_tuple(&aom_masked_sad128x64_ssse3, &aom_masked_sad128x64_c),
make_tuple(&aom_masked_sad64x128_ssse3, &aom_masked_sad64x128_c),
#endif // CONFIG_EXT_PARTITION
make_tuple(&aom_masked_compound_sad64x64_ssse3,
&aom_masked_compound_sad64x64_c),
make_tuple(&aom_masked_compound_sad64x32_ssse3,
&aom_masked_compound_sad64x32_c),
make_tuple(&aom_masked_compound_sad32x64_ssse3,
&aom_masked_compound_sad32x64_c),
make_tuple(&aom_masked_compound_sad32x32_ssse3,
&aom_masked_compound_sad32x32_c),
make_tuple(&aom_masked_compound_sad32x16_ssse3,
&aom_masked_compound_sad32x16_c),
make_tuple(&aom_masked_compound_sad16x32_ssse3,
&aom_masked_compound_sad16x32_c),
make_tuple(&aom_masked_compound_sad16x16_ssse3,
&aom_masked_compound_sad16x16_c),
make_tuple(&aom_masked_compound_sad16x8_ssse3,
&aom_masked_compound_sad16x8_c),
make_tuple(&aom_masked_compound_sad8x16_ssse3,
&aom_masked_compound_sad8x16_c),
make_tuple(&aom_masked_compound_sad8x8_ssse3,
&aom_masked_compound_sad8x8_c),
make_tuple(&aom_masked_compound_sad8x4_ssse3,
&aom_masked_compound_sad8x4_c),
make_tuple(&aom_masked_compound_sad4x8_ssse3,
&aom_masked_compound_sad4x8_c),
make_tuple(&aom_masked_compound_sad4x4_ssse3,
&aom_masked_compound_sad4x4_c)));
make_tuple(&aom_masked_sad64x64_ssse3, &aom_masked_sad64x64_c),
make_tuple(&aom_masked_sad64x32_ssse3, &aom_masked_sad64x32_c),
make_tuple(&aom_masked_sad32x64_ssse3, &aom_masked_sad32x64_c),
make_tuple(&aom_masked_sad32x32_ssse3, &aom_masked_sad32x32_c),
make_tuple(&aom_masked_sad32x16_ssse3, &aom_masked_sad32x16_c),
make_tuple(&aom_masked_sad16x32_ssse3, &aom_masked_sad16x32_c),
make_tuple(&aom_masked_sad16x16_ssse3, &aom_masked_sad16x16_c),
make_tuple(&aom_masked_sad16x8_ssse3, &aom_masked_sad16x8_c),
make_tuple(&aom_masked_sad8x16_ssse3, &aom_masked_sad8x16_c),
make_tuple(&aom_masked_sad8x8_ssse3, &aom_masked_sad8x8_c),
make_tuple(&aom_masked_sad8x4_ssse3, &aom_masked_sad8x4_c),
make_tuple(&aom_masked_sad4x8_ssse3, &aom_masked_sad4x8_c),
make_tuple(&aom_masked_sad4x4_ssse3, &aom_masked_sad4x4_c)));
#if CONFIG_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
SSSE3_C_COMPARE, HighbdMaskedSADTest,
::testing::Values(
INSTANTIATE_TEST_CASE_P(SSSE3_C_COMPARE, HighbdMaskedSADTest,
::testing::Values(
#if CONFIG_EXT_PARTITION
make_tuple(&aom_highbd_masked_compound_sad128x128_ssse3,
&aom_highbd_masked_compound_sad128x128_c),
make_tuple(&aom_highbd_masked_compound_sad128x64_ssse3,
&aom_highbd_masked_compound_sad128x64_c),
make_tuple(&aom_highbd_masked_compound_sad64x128_ssse3,
&aom_highbd_masked_compound_sad64x128_c),
make_tuple(&aom_highbd_masked_sad128x128_ssse3,
&aom_highbd_masked_sad128x128_c),
make_tuple(&aom_highbd_masked_sad128x64_ssse3,
&aom_highbd_masked_sad128x64_c),
make_tuple(&aom_highbd_masked_sad64x128_ssse3,
&aom_highbd_masked_sad64x128_c),
#endif // CONFIG_EXT_PARTITION
make_tuple(&aom_highbd_masked_compound_sad64x64_ssse3,
&aom_highbd_masked_compound_sad64x64_c),
make_tuple(&aom_highbd_masked_compound_sad64x32_ssse3,
&aom_highbd_masked_compound_sad64x32_c),
make_tuple(&aom_highbd_masked_compound_sad32x64_ssse3,
&aom_highbd_masked_compound_sad32x64_c),
make_tuple(&aom_highbd_masked_compound_sad32x32_ssse3,
&aom_highbd_masked_compound_sad32x32_c),
make_tuple(&aom_highbd_masked_compound_sad32x16_ssse3,
&aom_highbd_masked_compound_sad32x16_c),
make_tuple(&aom_highbd_masked_compound_sad16x32_ssse3,
&aom_highbd_masked_compound_sad16x32_c),
make_tuple(&aom_highbd_masked_compound_sad16x16_ssse3,
&aom_highbd_masked_compound_sad16x16_c),
make_tuple(&aom_highbd_masked_compound_sad16x8_ssse3,
&aom_highbd_masked_compound_sad16x8_c),
make_tuple(&aom_highbd_masked_compound_sad8x16_ssse3,
&aom_highbd_masked_compound_sad8x16_c),
make_tuple(&aom_highbd_masked_compound_sad8x8_ssse3,
&aom_highbd_masked_compound_sad8x8_c),
make_tuple(&aom_highbd_masked_compound_sad8x4_ssse3,
&aom_highbd_masked_compound_sad8x4_c),
make_tuple(&aom_highbd_masked_compound_sad4x8_ssse3,
&aom_highbd_masked_compound_sad4x8_c),
make_tuple(&aom_highbd_masked_compound_sad4x4_ssse3,
&aom_highbd_masked_compound_sad4x4_c)));
make_tuple(&aom_highbd_masked_sad64x64_ssse3,
&aom_highbd_masked_sad64x64_c),
make_tuple(&aom_highbd_masked_sad64x32_ssse3,
&aom_highbd_masked_sad64x32_c),
make_tuple(&aom_highbd_masked_sad32x64_ssse3,
&aom_highbd_masked_sad32x64_c),
make_tuple(&aom_highbd_masked_sad32x32_ssse3,
&aom_highbd_masked_sad32x32_c),
make_tuple(&aom_highbd_masked_sad32x16_ssse3,
&aom_highbd_masked_sad32x16_c),
make_tuple(&aom_highbd_masked_sad16x32_ssse3,
&aom_highbd_masked_sad16x32_c),
make_tuple(&aom_highbd_masked_sad16x16_ssse3,
&aom_highbd_masked_sad16x16_c),
make_tuple(&aom_highbd_masked_sad16x8_ssse3,
&aom_highbd_masked_sad16x8_c),
make_tuple(&aom_highbd_masked_sad8x16_ssse3,
&aom_highbd_masked_sad8x16_c),
make_tuple(&aom_highbd_masked_sad8x8_ssse3,
&aom_highbd_masked_sad8x8_c),
make_tuple(&aom_highbd_masked_sad8x4_ssse3,
&aom_highbd_masked_sad8x4_c),
make_tuple(&aom_highbd_masked_sad4x8_ssse3,
&aom_highbd_masked_sad4x8_c),
make_tuple(&aom_highbd_masked_sad4x4_ssse3,
&aom_highbd_masked_sad4x4_c)));
#endif // CONFIG_HIGHBITDEPTH
#endif // 0 && HAVE_SSSE3
} // namespace
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment