Commit c155e018 authored by David Barker's avatar David Barker Committed by Debargha Mukherjee
Browse files

ext-inter: Use joint_motion_search for masked compounds

Add functions which take both components of a masked compound and
compute the resulting SAD/SSE. Extend joint_motion_search to understand
masked compounds, and use it to evaluate NEW_NEWMV modes.

Change-Id: I782199a20d119a6c61c6567df157508125ac7ce7
parent 861d7071
......@@ -740,6 +740,7 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
specialize "aom_masked_sad${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_masked_compound_sad${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
......@@ -747,6 +748,8 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_highbd_masked_sad${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
specialize "aom_highbd_masked_sad${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_highbd_masked_compound_sad${w}x${h}", "const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, int msk_stride, int invert_mask";
}
}
}
......@@ -1049,6 +1052,9 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
add_proto qw/unsigned int/, "aom_masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
specialize "aom_masked_variance${w}x${h}", qw/ssse3/;
specialize "aom_masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_masked_compound_variance${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *m, int m_stride, int invert_mask, unsigned int *sse";
add_proto qw/unsigned int/, "aom_masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
......@@ -1059,6 +1065,9 @@ if (aom_config("CONFIG_EXT_INTER") eq "yes") {
add_proto qw/unsigned int/, "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
specialize "aom_highbd${bd}masked_variance${w}x${h}", qw/ssse3/;
specialize "aom_highbd${bd}masked_sub_pixel_variance${w}x${h}", qw/ssse3/;
add_proto qw/unsigned int/, "aom_highbd${bd}masked_compound_variance${w}x${h}", "const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *m, int m_stride, int invert_mask, unsigned int *sse";
add_proto qw/unsigned int/, "aom_highbd${bd}masked_compound_sub_pixel_variance${w}x${h}", "const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *ref, int ref_stride, const uint8_t *second_pred, const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse";
}
}
}
......@@ -1501,6 +1510,15 @@ if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
} # CONFIG_HIGHBITDEPTH
if (aom_config("CONFIG_EXT_INTER") eq "yes") {
add_proto qw/void aom_comp_mask_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
add_proto qw/void aom_comp_mask_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_comp_mask_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
add_proto qw/void aom_highbd_comp_mask_upsampled_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride, const uint8_t *mask, int mask_stride, int invert_mask";
}
}
} # CONFIG_ENCODERS
1;
......@@ -16,6 +16,7 @@
#include "aom/aom_integer.h"
#include "aom_ports/mem.h"
#include "aom_dsp/blend.h"
/* Sum the difference between every corresponding element of the buffers. */
static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b,
......@@ -329,12 +330,48 @@ highbd_sadMxNx4D(4, 4)
return sad;
}
static INLINE unsigned int masked_compound_sad(const uint8_t *src,
int src_stride, const uint8_t *a,
int a_stride, const uint8_t *b,
int b_stride, const uint8_t *m,
int m_stride, int width,
int height) {
int y, x;
unsigned int sad = 0;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
const uint8_t pred = AOM_BLEND_A64(m[x], a[x], b[x]);
sad += abs(pred - src[x]);
}
src += src_stride;
a += a_stride;
b += b_stride;
m += m_stride;
}
sad = (sad + 31) >> 6;
return sad;
}
#define MASKSADMxN(m, n) \
unsigned int aom_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return masked_sad(src, src_stride, ref, ref_stride, msk, msk_stride, m, \
n); \
} \
unsigned int aom_masked_compound_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred, const uint8_t *msk, int msk_stride, \
int invert_mask) { \
if (!invert_mask) \
return masked_compound_sad(src, src_stride, ref, ref_stride, \
second_pred, m, msk, msk_stride, m, n); \
else \
return masked_compound_sad(src, src_stride, second_pred, m, ref, \
ref_stride, msk, msk_stride, m, n); \
}
/* clang-format off */
......@@ -381,12 +418,51 @@ MASKSADMxN(4, 4)
return sad;
}
static INLINE unsigned int highbd_masked_compound_sad(
const uint8_t *src8, int src_stride, const uint8_t *a8, int a_stride,
const uint8_t *b8, int b_stride, const uint8_t *m, int m_stride, int width,
int height) {
int y, x;
unsigned int sad = 0;
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
const uint16_t pred = AOM_BLEND_A64(m[x], a[x], b[x]);
sad += abs(pred - src[x]);
}
src += src_stride;
a += a_stride;
b += b_stride;
m += m_stride;
}
sad = (sad + 31) >> 6;
return sad;
}
#define HIGHBD_MASKSADMXN(m, n) \
unsigned int aom_highbd_masked_sad##m##x##n##_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *msk, int msk_stride) { \
return highbd_masked_sad(src, src_stride, ref, ref_stride, msk, \
msk_stride, m, n); \
} \
unsigned int aom_highbd_masked_compound_sad##m##x##n##_c( \
const uint8_t *src8, int src_stride, const uint8_t *ref8, \
int ref_stride, const uint8_t *second_pred8, const uint8_t *msk, \
int msk_stride, int invert_mask) { \
if (!invert_mask) \
return highbd_masked_compound_sad(src8, src_stride, ref8, ref_stride, \
second_pred8, m, msk, msk_stride, m, \
n); \
else \
return highbd_masked_compound_sad(src8, src_stride, second_pred8, m, \
ref8, ref_stride, msk, msk_stride, m, \
n); \
}
#if CONFIG_EXT_PARTITION
......
This diff is collapsed.
......@@ -66,6 +66,19 @@ typedef unsigned int (*aom_masked_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *msk, int msk_stride,
unsigned int *sse);
typedef unsigned int (*aom_masked_compound_sad_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *second_pred, const uint8_t *msk, int msk_stride,
int invert_mask);
typedef unsigned int (*aom_masked_compound_variance_fn_t)(
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride,
const uint8_t *second_pred, const uint8_t *m, int m_stride, int invert_mask,
unsigned int *sse);
typedef unsigned int (*aom_masked_compound_subpixvariance_fn_t)(
const uint8_t *src, int src_stride, int xoffset, int yoffset,
const uint8_t *ref, int ref_stride, const uint8_t *second_pred,
const uint8_t *msk, int msk_stride, int invert_mask, unsigned int *sse);
#endif // CONFIG_AV1 && CONFIG_EXT_INTER
#if CONFIG_AV1 && CONFIG_MOTION_VAR
......@@ -96,6 +109,10 @@ typedef struct aom_variance_vtable {
aom_masked_sad_fn_t msdf;
aom_masked_variance_fn_t mvf;
aom_masked_subpixvariance_fn_t msvf;
aom_masked_compound_sad_fn_t mcsdf;
aom_masked_compound_variance_fn_t mcvf;
aom_masked_compound_subpixvariance_fn_t mcsvf;
#endif // CONFIG_EXT_INTER
#if CONFIG_MOTION_VAR
aom_obmc_sad_fn_t osdf;
......
This diff is collapsed.
......@@ -52,11 +52,14 @@ static unsigned int do_16x16_motion_iteration(AV1_COMP *cpi, const MV *ref_mv,
{
int distortion;
unsigned int sse;
cpi->find_fractional_mv_step(x, ref_mv, cpi->common.allow_high_precision_mv,
x->errorperbit, &v_fn_ptr, 0,
mv_sf->subpel_iters_per_step,
cond_cost_list(cpi, cost_list), NULL, NULL,
&distortion, &sse, NULL, 0, 0, 0);
cpi->find_fractional_mv_step(
x, ref_mv, cpi->common.allow_high_precision_mv, x->errorperbit,
&v_fn_ptr, 0, mv_sf->subpel_iters_per_step,
cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
0, 0, 0);
}
#if CONFIG_EXT_INTER
......
This diff is collapsed.
......@@ -58,6 +58,13 @@ int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const aom_variance_fn_ptr_t *vfp, int use_mvcost);
#if CONFIG_EXT_INTER
int av1_get_mvpred_mask_var(const MACROBLOCK *x, const MV *best_mv,
const MV *center_mv, const uint8_t *second_pred,
const uint8_t *mask, int mask_stride,
int invert_mask, const aom_variance_fn_ptr_t *vfp,
int use_mvcost);
#endif
struct AV1_COMP;
struct SPEED_FEATURES;
......@@ -91,8 +98,11 @@ typedef int(fractional_mv_step_fp)(
const aom_variance_fn_ptr_t *vfp,
int forced_stop, // 0 - full, 1 - qtr only, 2 - half only
int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
int h, int use_upsampled_ref);
int *distortion, unsigned int *sse1, const uint8_t *second_pred,
#if CONFIG_EXT_INTER
const uint8_t *mask, int mask_stride, int invert_mask,
#endif
int w, int h, int use_upsampled_ref);
extern fractional_mv_step_fp av1_find_best_sub_pixel_tree;
extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned;
......@@ -113,6 +123,10 @@ typedef int (*av1_diamond_search_fn_t)(
int av1_refining_search_8p_c(MACROBLOCK *x, int error_per_bit, int search_range,
const aom_variance_fn_ptr_t *fn_ptr,
#if CONFIG_EXT_INTER
const uint8_t *mask, int mask_stride,
int invert_mask,
#endif
const MV *center_mv, const uint8_t *second_pred);
struct AV1_COMP;
......
......@@ -5452,7 +5452,8 @@ static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int_mv *frame_mv, int mi_row,
int mi_col,
#if CONFIG_EXT_INTER
int_mv *ref_mv_sub8x8[2],
int_mv *ref_mv_sub8x8[2], const uint8_t *mask,
int mask_stride,
#endif // CONFIG_EXT_INTER
int *rate_mv, const int block) {
const AV1_COMMON *const cm = &cpi->common;
......@@ -5618,10 +5619,21 @@ static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
// Small-range full-pixel motion search.
bestsme =
av1_refining_search_8p_c(x, sadpb, search_range, &cpi->fn_ptr[bsize],
#if CONFIG_EXT_INTER
mask, mask_stride, id,
#endif
&ref_mv[id].as_mv, second_pred);
if (bestsme < INT_MAX)
bestsme = av1_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
if (bestsme < INT_MAX) {
#if CONFIG_EXT_INTER
if (mask)
bestsme = av1_get_mvpred_mask_var(x, best_mv, &ref_mv[id].as_mv,
second_pred, mask, mask_stride, id,
&cpi->fn_ptr[bsize], 1);
else
#endif
bestsme = av1_get_mvpred_av_var(x, best_mv, &ref_mv[id].as_mv,
second_pred, &cpi->fn_ptr[bsize], 1);
}
x->mv_limits = tmp_mv_limits;
......@@ -5654,7 +5666,11 @@ static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
x, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize], 0,
cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
&dis, &sse, second_pred, pw, ph, 1);
&dis, &sse, second_pred,
#if CONFIG_EXT_INTER
mask, mask_stride, id,
#endif
pw, ph, 1);
// Restore the reference frames.
pd->pre[0] = backup_pred;
......@@ -5664,7 +5680,11 @@ static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
x, &ref_mv[id].as_mv, cpi->common.allow_high_precision_mv,
x->errorperbit, &cpi->fn_ptr[bsize], 0,
cpi->sf.mv.subpel_iters_per_step, NULL, x->nmvjointcost, x->mvcost,
&dis, &sse, second_pred, pw, ph, 0);
&dis, &sse, second_pred,
#if CONFIG_EXT_INTER
mask, mask_stride, id,
#endif
pw, ph, 0);
}
}
......@@ -6060,8 +6080,11 @@ static int64_t rd_pick_inter_best_sub8x8_mode(
cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
&distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL, pw, ph,
1);
&distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
pw, ph, 1);
if (try_second) {
int this_var;
......@@ -6088,7 +6111,11 @@ static int64_t rd_pick_inter_best_sub8x8_mode(
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list), x->nmvjointcost,
x->mvcost, &distortion, &x->pred_sse[mbmi->ref_frame[0]],
NULL, pw, ph, 1);
NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
pw, ph, 1);
if (this_var < best_mv_var) best_mv = x->best_mv.as_mv;
x->best_mv.as_mv = best_mv;
}
......@@ -6103,7 +6130,11 @@ static int64_t rd_pick_inter_best_sub8x8_mode(
cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
&distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL, 0, 0, 0);
&distortion, &x->pred_sse[mbmi->ref_frame[0]], NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
0, 0, 0);
}
// save motion search result for use in compound prediction
......@@ -6165,7 +6196,7 @@ static int64_t rd_pick_inter_best_sub8x8_mode(
joint_motion_search(cpi, x, bsize, frame_mv[this_mode], mi_row,
mi_col,
#if CONFIG_EXT_INTER
bsi->ref_mv,
bsi->ref_mv, NULL, 0,
#endif // CONFIG_EXT_INTER
&rate_mv, index);
#if CONFIG_EXT_INTER
......@@ -6958,8 +6989,11 @@ static void single_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, pw, ph,
1);
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
pw, ph, 1);
if (try_second) {
const int minc =
......@@ -6983,7 +7017,11 @@ static void single_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step,
cond_cost_list(cpi, cost_list), x->nmvjointcost, x->mvcost,
&dis, &x->pred_sse[ref], NULL, pw, ph, 1);
&dis, &x->pred_sse[ref], NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
pw, ph, 1);
if (this_var < best_mv_var) best_mv = x->best_mv.as_mv;
x->best_mv.as_mv = best_mv;
}
......@@ -6996,8 +7034,11 @@ static void single_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
x, &ref_mv, cm->allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[bsize], cpi->sf.mv.subpel_force_stop,
cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0,
0);
x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
0, 0, 0);
}
#if CONFIG_MOTION_VAR
break;
......@@ -7161,7 +7202,7 @@ static void do_masked_motion_search(const AV1_COMP *const cpi, MACROBLOCK *x,
}
static void do_masked_motion_search_indexed(
const AV1_COMP *const cpi, MACROBLOCK *x,
const AV1_COMP *const cpi, MACROBLOCK *x, const int_mv *const cur_mv,
const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE bsize,
int mi_row, int mi_col, int_mv *tmp_mv, int *rate_mv, int which) {
// NOTE: which values: 0 - 0 only, 1 - 1 only, 2 - both
......@@ -7173,11 +7214,21 @@ static void do_masked_motion_search_indexed(
mask = av1_get_compound_type_mask(comp_data, sb_type);
if (which == 0 || which == 2)
if (which == 2) {
int_mv frame_mv[TOTAL_REFS_PER_FRAME];
MV_REFERENCE_FRAME rf[2] = { mbmi->ref_frame[0], mbmi->ref_frame[1] };
assert(bsize >= BLOCK_8X8 || CONFIG_CB4X4);
frame_mv[rf[0]].as_int = cur_mv[0].as_int;
frame_mv[rf[1]].as_int = cur_mv[1].as_int;
joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, mask,
mask_stride, rate_mv, 0);
tmp_mv[0].as_int = frame_mv[rf[0]].as_int;
tmp_mv[1].as_int = frame_mv[rf[1]].as_int;
} else if (which == 0) {
do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
&tmp_mv[0], &rate_mv[0], 0);
if (which == 1 || which == 2) {
&tmp_mv[0], rate_mv, 0);
} else if (which == 1) {
// get the negative mask
#if CONFIG_COMPOUND_SEGMENT
uint8_t inv_mask_buf[2 * MAX_SB_SQUARE];
......@@ -7188,7 +7239,7 @@ static void do_masked_motion_search_indexed(
mask = av1_get_compound_type_mask_inverse(comp_data, sb_type);
#endif // CONFIG_COMPOUND_SEGMENT
do_masked_motion_search(cpi, x, mask, mask_stride, bsize, mi_row, mi_col,
&tmp_mv[1], &rate_mv[1], 1);
&tmp_mv[1], rate_mv, 1);
}
}
#endif // CONFIG_COMPOUND_SEGMENT || CONFIG_WEDGE
......@@ -7665,15 +7716,13 @@ static int64_t pick_interinter_mask(const AV1_COMP *const cpi, MACROBLOCK *x,
}
}
static int interinter_compound_motion_search(const AV1_COMP *const cpi,
MACROBLOCK *x,
const BLOCK_SIZE bsize,
const int this_mode, int mi_row,
int mi_col) {
static int interinter_compound_motion_search(
const AV1_COMP *const cpi, MACROBLOCK *x, const int_mv *const cur_mv,
const BLOCK_SIZE bsize, const int this_mode, int mi_row, int mi_col) {
MACROBLOCKD *const xd = &x->e_mbd;
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int_mv tmp_mv[2];
int rate_mvs[2], tmp_rate_mv = 0;
int tmp_rate_mv = 0;
const INTERINTER_COMPOUND_DATA compound_data = {
#if CONFIG_WEDGE
mbmi->wedge_index,
......@@ -7686,20 +7735,17 @@ static int interinter_compound_motion_search(const AV1_COMP *const cpi,
mbmi->interinter_compound_type
};
if (this_mode == NEW_NEWMV) {
do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
mi_col, tmp_mv, rate_mvs, 2);
tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
do_masked_motion_search_indexed(cpi, x, cur_mv, &compound_data, bsize,
mi_row, mi_col, tmp_mv, &tmp_rate_mv, 2);
mbmi->mv[0].as_int = tmp_mv[0].as_int;
mbmi->mv[1].as_int = tmp_mv[1].as_int;
} else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
mi_col, tmp_mv, rate_mvs, 0);
tmp_rate_mv = rate_mvs[0];
do_masked_motion_search_indexed(cpi, x, cur_mv, &compound_data, bsize,
mi_row, mi_col, tmp_mv, &tmp_rate_mv, 0);
mbmi->mv[0].as_int = tmp_mv[0].as_int;
} else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
do_masked_motion_search_indexed(cpi, x, &compound_data, bsize, mi_row,
mi_col, tmp_mv, rate_mvs, 1);
tmp_rate_mv = rate_mvs[1];
do_masked_motion_search_indexed(cpi, x, cur_mv, &compound_data, bsize,
mi_row, mi_col, tmp_mv, &tmp_rate_mv, 1);
mbmi->mv[1].as_int = tmp_mv[1].as_int;
}
return tmp_rate_mv;
......@@ -7726,8 +7772,8 @@ static int64_t build_and_cost_compound_type(
if (have_newmv_in_inter_mode(this_mode) &&
use_masked_motion_search(compound_type)) {
*out_rate_mv = interinter_compound_motion_search(cpi, x, bsize, this_mode,
mi_row, mi_col);
*out_rate_mv = interinter_compound_motion_search(cpi, x, cur_mv, bsize,
this_mode, mi_row, mi_col);
av1_build_inter_predictors_sby(cm, xd, mi_row, mi_col, ctx, bsize);
model_rd_for_sb(cpi, bsize, x, xd, 0, 0, &rate_sum, &dist_sum,
&tmp_skip_txfm_sb, &tmp_skip_sse_sb);
......@@ -7823,8 +7869,8 @@ static int64_t handle_newmv(const AV1_COMP *const cpi, MACROBLOCK *const x,
frame_mv[refs[1]].as_int = single_newmv[refs[1]].as_int;
if (cpi->sf.comp_inter_joint_search_thresh <= bsize) {
joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, NULL,
rate_mv, 0);
joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col, NULL, NULL,
0, rate_mv, 0);
} else {
*rate_mv = 0;
for (i = 0; i < 2; ++i) {
......
......@@ -297,8 +297,11 @@ static int temporal_filter_find_matching_mb_c(AV1_COMP *cpi,
bestsme = cpi->find_fractional_mv_step(
x, &best_ref_mv1, cpi->common.allow_high_precision_mv, x->errorperbit,
&cpi->fn_ptr[BLOCK_16X16], 0, mv_sf->subpel_iters_per_step,
cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL, 0, 0,
0);
cond_cost_list(cpi, cost_list), NULL, NULL, &distortion, &sse, NULL,
#if CONFIG_EXT_INTER
NULL, 0, 0,
#endif
0, 0, 0);
x->e_mbd.mi[0]->bmi[0].as_mv[0] = x->best_mv;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment