Commit f78632e0 authored by Cheng Chen's avatar Cheng Chen Committed by Jingning Han

JNT_COMP: Refactor code

The refactoring serves two purposes:
1. Separate code paths for jnt_comp and original compound average
computation. It provides function interface for jnt_comp while leaving
original compound average computation unchanged. In near future, SIMD
functions can be added for jnt_comp using the interface.

2. Previous implementation uses a hack on second_pred. But it may cause
segmentation fault when the test clip is small. As reported in Issue
944. This refactoring removes hacking and make it possible to address
the seg fault problem in the future.

Change-Id: Idd2cb99f6c77dae03d32ccfa1f9cbed1d7eed067
parent 7fc6b2ac
......@@ -7,6 +7,7 @@ print <<EOF
#include "aom/aom_integer.h"
#include "aom_dsp/aom_dsp_common.h"
#include "av1/common/enums.h"
#include "av1/common/blockd.h"
EOF
}
......@@ -829,6 +830,9 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
($w, $h) = @$_;
add_proto qw/unsigned int/, "aom_sad${w}x${h}", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride";
add_proto qw/unsigned int/, "aom_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
if (aom_config("CONFIG_JNT_COMP") eq "yes") {
add_proto qw/unsigned int/, "aom_jnt_sad${w}x${h}_avg", "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred, const JNT_COMP_PARAMS *jcp_param";
}
}
specialize qw/aom_sad128x128 avx2 sse2/;
......@@ -1100,9 +1104,14 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
#
add_proto qw/void aom_upsampled_pred/, "uint8_t *comp_pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref, int ref_stride";
specialize qw/aom_upsampled_pred sse2/;
add_proto qw/void aom_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref, int ref_stride";
specialize qw/aom_comp_avg_upsampled_pred sse2/;
if (aom_config("CONFIG_JNT_COMP") eq "yes") {
add_proto qw/void aom_jnt_comp_avg_upsampled_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref, int ref_stride, const JNT_COMP_PARAMS *jcp_param";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/void aom_highbd_upsampled_pred/, "uint16_t *comp_pred, int width, int height, int subsample_x_q3, int subsample_y_q3, const uint8_t *ref8, int ref_stride, int bd";
specialize qw/aom_highbd_upsampled_pred sse2/;
......@@ -1133,6 +1142,9 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/unsigned int/, "aom_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
add_proto qw/uint32_t/, "aom_sub_pixel_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse";
add_proto qw/uint32_t/, "aom_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
if (aom_config("CONFIG_JNT_COMP") eq "yes") {
add_proto qw/uint32_t/, "aom_jnt_sub_pixel_avg_variance${w}x${h}", "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred, const JNT_COMP_PARAMS *jcp_param";
}
}
specialize qw/aom_variance64x64 sse2 avx2 neon msa/;
......@@ -1309,7 +1321,6 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
add_proto qw/uint32_t aom_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
specialize qw/aom_sub_pixel_avg_variance4x4 msa sse2 ssse3/;
#
# Specialty Subpixel
#
......@@ -1326,6 +1337,9 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
# Comp Avg
#
add_proto qw/void aom_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
if (aom_config("CONFIG_JNT_COMP") eq "yes") {
add_proto qw/void aom_jnt_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride, const JNT_COMP_PARAMS *jcp_param";
}
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto qw/unsigned int aom_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
specialize qw/aom_highbd_12_variance64x64 sse2/;
......
......@@ -33,6 +33,28 @@ static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b,
return sad;
}
#if CONFIG_JNT_COMP
#define sadMxN(m, n) \
unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride) { \
return sad(src, src_stride, ref, ref_stride, m, n); \
} \
unsigned int aom_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred) { \
uint8_t comp_pred[m * n]; \
aom_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride); \
return sad(src, src_stride, comp_pred, m, m, n); \
} \
unsigned int aom_jnt_sad##m##x##n##_avg_c( \
const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \
const uint8_t *second_pred, const JNT_COMP_PARAMS *jcp_param) { \
uint8_t comp_pred[m * n]; \
aom_jnt_comp_avg_pred(comp_pred, second_pred, m, n, ref, ref_stride, \
jcp_param); \
return sad(src, src_stride, comp_pred, m, m, n); \
}
#else // CONFIG_JNT_COMP
#define sadMxN(m, n) \
unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
const uint8_t *ref, int ref_stride) { \
......@@ -45,6 +67,7 @@ static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b,
aom_comp_avg_pred_c(comp_pred, second_pred, m, n, ref, ref_stride); \
return sad(src, src_stride, comp_pred, m, m, n); \
}
#endif // CONFIG_JNT_COMP
// depending on call sites, pass **ref_array to avoid & in subsequent call and
// de-dup with 4D below.
......
......@@ -180,6 +180,43 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
return aom_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
}
#if CONFIG_JNT_COMP
#define SUBPIX_AVG_VAR(W, H) \
uint32_t aom_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, int xoffset, int yoffset, \
const uint8_t *b, int b_stride, uint32_t *sse, \
const uint8_t *second_pred) { \
uint16_t fdata3[(H + 1) * W]; \
uint8_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
bilinear_filters_2t[xoffset]); \
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
aom_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
\
return aom_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
} \
uint32_t aom_jnt_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, int xoffset, int yoffset, \
const uint8_t *b, int b_stride, uint32_t *sse, \
const uint8_t *second_pred, const JNT_COMP_PARAMS *jcp_param) { \
uint16_t fdata3[(H + 1) * W]; \
uint8_t temp2[H * W]; \
DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
\
var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
bilinear_filters_2t[xoffset]); \
var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
bilinear_filters_2t[yoffset]); \
\
aom_jnt_comp_avg_pred(temp3, second_pred, W, H, temp2, W, jcp_param); \
\
return aom_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
}
#else // CONFIG_JNT_COMP
#define SUBPIX_AVG_VAR(W, H) \
uint32_t aom_sub_pixel_avg_variance##W##x##H##_c( \
const uint8_t *a, int a_stride, int xoffset, int yoffset, \
......@@ -198,6 +235,7 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
\
return aom_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
}
#endif // CONFIG_JNT_COMP
/* Identical to the variance call except it takes an additional parameter, sum,
* and returns that value using pass-by-reference instead of returning
......@@ -275,23 +313,11 @@ MSE(8, 8)
void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
int height, const uint8_t *ref, int ref_stride) {
int i, j;
#if CONFIG_JNT_COMP
int bck_offset = pred[4096];
int fwd_offset = pred[4097];
double sum = bck_offset + fwd_offset;
#endif // CONFIG_JNT_COMP
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
#if CONFIG_JNT_COMP
int tmp = pred[j] * fwd_offset + ref[j] * bck_offset;
tmp = (int)(0.5 + tmp / sum);
if (tmp > 255) tmp = 255;
comp_pred[j] = (uint8_t)tmp;
#else
const int tmp = pred[j] + ref[j];
comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
#endif // CONFIG_JNT_COMP
}
comp_pred += width;
pred += width;
......@@ -352,35 +378,65 @@ void aom_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
int subpel_y_q3, const uint8_t *ref,
int ref_stride) {
int i, j;
aom_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
ref_stride);
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
comp_pred[j] = ROUND_POWER_OF_TWO(comp_pred[j] + pred[j], 1);
}
comp_pred += width;
pred += width;
}
}
#if CONFIG_JNT_COMP
int bck_offset = pred[4096];
int fwd_offset = pred[4097];
void aom_jnt_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
int height, const uint8_t *ref, int ref_stride,
const JNT_COMP_PARAMS *jcp_param) {
int i, j;
const int fwd_offset = jcp_param->fwd_offset;
const int bck_offset = jcp_param->bck_offset;
double sum = bck_offset + fwd_offset;
for (i = 0; i < height; ++i) {
for (j = 0; j < width; ++j) {
int tmp = pred[j] * bck_offset + ref[j] * fwd_offset;
tmp = (int)(0.5 + tmp / sum);
if (tmp > 255) tmp = 255;
comp_pred[j] = (uint8_t)tmp;
}
comp_pred += width;
pred += width;
ref += ref_stride;
}
}
void aom_jnt_comp_avg_upsampled_pred_c(uint8_t *comp_pred, const uint8_t *pred,
int width, int height, int subpel_x_q3,
int subpel_y_q3, const uint8_t *ref,
int ref_stride,
const JNT_COMP_PARAMS *jcp_param) {
int i, j;
const int fwd_offset = jcp_param->fwd_offset;
const int bck_offset = jcp_param->bck_offset;
double sum = bck_offset + fwd_offset;
#endif // CONFIG_JNT_COMP
#if CONFIG_JNT_COMP
aom_upsampled_pred_c(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
ref_stride);
#else
aom_upsampled_pred(comp_pred, width, height, subpel_x_q3, subpel_y_q3, ref,
ref_stride);
#endif // CONFIG_JNT_COMP
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
#if CONFIG_JNT_COMP
int tmp = pred[j] * fwd_offset + comp_pred[j] * bck_offset;
int tmp = pred[j] * bck_offset + comp_pred[j] * fwd_offset;
tmp = (int)(0.5 + tmp / sum);
if (tmp > 255) tmp = 255;
comp_pred[j] = (uint8_t)tmp;
#else
comp_pred[j] = ROUND_POWER_OF_TWO(comp_pred[j] + pred[j], 1);
#endif // CONFIG_JNT_COMP
}
comp_pred += width;
pred += width;
}
}
#endif // CONFIG_JNT_COMP
#if CONFIG_HIGHBITDEPTH
static void highbd_variance64(const uint8_t *a8, int a_stride,
......
......@@ -54,6 +54,18 @@ typedef unsigned int (*aom_subp_avg_variance_fn_t)(
const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b,
int b_stride, unsigned int *sse, const uint8_t *second_pred);
#if CONFIG_JNT_COMP
typedef unsigned int (*aom_jnt_sad_avg_fn_t)(const uint8_t *a, int a_stride,
const uint8_t *b, int b_stride,
const uint8_t *second_pred,
const JNT_COMP_PARAMS *jcp_param);
typedef unsigned int (*aom_jnt_subp_avg_variance_fn_t)(
const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b,
int b_stride, unsigned int *sse, const uint8_t *second_pred,
const JNT_COMP_PARAMS *jcp_param);
#endif // CONFIG_JNT_COMP
#if CONFIG_AV1
typedef unsigned int (*aom_masked_sad_fn_t)(const uint8_t *src, int src_stride,
const uint8_t *ref, int ref_stride,
......@@ -95,6 +107,10 @@ typedef struct aom_variance_vtable {
aom_obmc_sad_fn_t osdf;
aom_obmc_variance_fn_t ovf;
aom_obmc_subpixvariance_fn_t osvf;
#if CONFIG_JNT_COMP
aom_jnt_sad_avg_fn_t jsdaf;
aom_jnt_subp_avg_variance_fn_t jsvaf;
#endif // CONFIG_JNT_COMP
} aom_variance_fn_ptr_t;
#endif // CONFIG_AV1
......
......@@ -625,6 +625,13 @@ typedef struct cfl_ctx {
} CFL_CTX;
#endif // CONFIG_CFL
#if CONFIG_JNT_COMP
typedef struct jnt_comp_params {
int fwd_offset;
int bck_offset;
} JNT_COMP_PARAMS;
#endif // CONFIG_JNT_COMP
typedef struct macroblockd {
struct macroblockd_plane plane[MAX_MB_PLANE];
uint8_t bmode_blocks_wl;
......@@ -745,6 +752,10 @@ typedef struct macroblockd {
#if CONFIG_CFL
CFL_CTX *cfl;
#endif
#if CONFIG_JNT_COMP
JNT_COMP_PARAMS jcp_param;
#endif
} MACROBLOCKD;
static INLINE int get_bitdepth_data_path_index(const MACROBLOCKD *xd) {
......
......@@ -929,10 +929,11 @@ typedef struct SubpelParams {
} SubpelParams;
#if CONFIG_JNT_COMP
static void jnt_comp_weight_assign(const AV1_COMMON *cm,
const MB_MODE_INFO *mbmi,
ConvolveParams *conv_params,
int is_compound) {
void av1_jnt_comp_weight_assign(const AV1_COMMON *cm, const MB_MODE_INFO *mbmi,
int order_idx, int *fwd_offset, int *bck_offset,
int is_compound) {
assert(fwd_offset != NULL && bck_offset != NULL);
if (is_compound) {
int bck_idx = cm->frame_refs[mbmi->ref_frame[0] - LAST_FRAME].idx;
int fwd_idx = cm->frame_refs[mbmi->ref_frame[1] - LAST_FRAME].idx;
......@@ -947,8 +948,8 @@ static void jnt_comp_weight_assign(const AV1_COMMON *cm,
fwd_frame_index = cm->buffer_pool->frame_bufs[fwd_idx].cur_frame_offset;
}
conv_params->bck_offset = abs(cur_frame_index - bck_frame_index);
conv_params->fwd_offset = abs(fwd_frame_index - cur_frame_index);
*bck_offset = abs(cur_frame_index - bck_frame_index);
*fwd_offset = abs(fwd_frame_index - cur_frame_index);
const double fwd = abs(fwd_frame_index - cur_frame_index);
const double bck = abs(cur_frame_index - bck_frame_index);
......@@ -967,22 +968,21 @@ static void jnt_comp_weight_assign(const AV1_COMMON *cm,
for (quant_dist_idx = 0; quant_dist_idx < 4; ++quant_dist_idx) {
if (ratio < quant_dist_category[quant_dist_idx]) break;
}
conv_params->fwd_offset =
quant_dist_lookup_table[0][quant_dist_idx][order];
conv_params->bck_offset =
quant_dist_lookup_table[0][quant_dist_idx][1 - order];
*fwd_offset = quant_dist_lookup_table[order_idx][quant_dist_idx][order];
*bck_offset =
quant_dist_lookup_table[order_idx][quant_dist_idx][1 - order];
} else {
conv_params->fwd_offset = (DIST_PRECISION >> 1);
conv_params->bck_offset = (DIST_PRECISION >> 1);
*fwd_offset = (DIST_PRECISION >> 1);
*bck_offset = (DIST_PRECISION >> 1);
}
if (mbmi->compound_idx) {
conv_params->fwd_offset = -1;
conv_params->bck_offset = -1;
*fwd_offset = -1;
*bck_offset = -1;
}
} else {
conv_params->bck_offset = -1;
conv_params->fwd_offset = -1;
*bck_offset = -1;
*fwd_offset = -1;
}
}
#endif // CONFIG_JNT_COMP
......@@ -1288,7 +1288,8 @@ static INLINE void build_inter_predictors(const AV1_COMMON *cm, MACROBLOCKD *xd,
ConvolveParams conv_params =
get_conv_params_no_round(ref, ref, plane, tmp_dst, MAX_SB_SIZE);
#if CONFIG_JNT_COMP
jnt_comp_weight_assign(cm, &mi->mbmi, &conv_params, is_compound);
av1_jnt_comp_weight_assign(cm, &mi->mbmi, 0, &conv_params.fwd_offset,
&conv_params.bck_offset, is_compound);
#endif // CONFIG_JNT_COMP
#else
......
......@@ -560,6 +560,12 @@ void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize,
uint8_t *ext_dst1[3],
int ext_dst_stride1[3]);
#if CONFIG_JNT_COMP
void av1_jnt_comp_weight_assign(const AV1_COMMON *cm, const MB_MODE_INFO *mbmi,
int order_idx, int *fwd_offset, int *bck_offset,
int is_compound);
#endif // CONFIG_JNT_COMP
#ifdef __cplusplus
} // extern "C"
#endif
......
This diff is collapsed.
This diff is collapsed.
......@@ -5789,56 +5789,6 @@ static int check_best_zero_mv(
return 1;
}
#if CONFIG_JNT_COMP
static void jnt_comp_weight_assign(const AV1_COMMON *cm,
const MB_MODE_INFO *mbmi, int order_idx,
uint8_t *second_pred) {
if (mbmi->compound_idx) {
second_pred[4096] = -1;
second_pred[4097] = -1;
} else {
int bck_idx = cm->frame_refs[mbmi->ref_frame[0] - LAST_FRAME].idx;
int fwd_idx = cm->frame_refs[mbmi->ref_frame[1] - LAST_FRAME].idx;
int bck_frame_index = 0, fwd_frame_index = 0;
int cur_frame_index = cm->cur_frame->cur_frame_offset;
if (bck_idx >= 0) {
bck_frame_index = cm->buffer_pool->frame_bufs[bck_idx].cur_frame_offset;
}
if (fwd_idx >= 0) {
fwd_frame_index = cm->buffer_pool->frame_bufs[fwd_idx].cur_frame_offset;
}
const double fwd = abs(fwd_frame_index - cur_frame_index);
const double bck = abs(cur_frame_index - bck_frame_index);
int order;
double ratio;
if (COMPOUND_WEIGHT_MODE == DIST) {
if (fwd > bck) {
ratio = (bck != 0) ? fwd / bck : 5.0;
order = 0;
} else {
ratio = (fwd != 0) ? bck / fwd : 5.0;
order = 1;
}
int quant_dist_idx;
for (quant_dist_idx = 0; quant_dist_idx < 4; ++quant_dist_idx) {
if (ratio < quant_dist_category[quant_dist_idx]) break;
}
second_pred[4096] =
quant_dist_lookup_table[order_idx][quant_dist_idx][order];
second_pred[4097] =
quant_dist_lookup_table[order_idx][quant_dist_idx][1 - order];
} else {
second_pred[4096] = (DIST_PRECISION >> 1);
second_pred[4097] = (DIST_PRECISION >> 1);
}
}
}
#endif // CONFIG_JNT_COMP
static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
BLOCK_SIZE bsize, int_mv *frame_mv,
#if CONFIG_COMPOUND_SINGLEREF
......@@ -5901,13 +5851,8 @@ static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
// Prediction buffer from second frame.
#if CONFIG_HIGHBITDEPTH
#if CONFIG_JNT_COMP
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE + 2]);
uint8_t *second_pred;
#else
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
uint8_t *second_pred;
#endif // CONFIG_JNT_COMP
#else // CONFIG_HIGHBITDEPTH
DECLARE_ALIGNED(16, uint8_t, second_pred[MAX_SB_SQUARE]);
#endif // CONFIG_HIGHBITDEPTH
......@@ -6046,7 +5991,8 @@ static void joint_motion_search(const AV1_COMP *cpi, MACROBLOCK *x,
#if CONFIG_JNT_COMP
const int order_idx = id != 0;
jnt_comp_weight_assign(cm, mbmi, order_idx, second_pred);
av1_jnt_comp_weight_assign(cm, mbmi, order_idx, &xd->jcp_param.fwd_offset,
&xd->jcp_param.bck_offset, 1);
#endif // CONFIG_JNT_COMP
// Do compound motion search on the current reference frame.
......@@ -6761,7 +6707,8 @@ static void build_second_inter_pred(const AV1_COMP *cpi, MACROBLOCK *x,
#endif // CONFIG_HIGHBITDEPTH
#if CONFIG_JNT_COMP
jnt_comp_weight_assign(cm, mbmi, 0, second_pred);
av1_jnt_comp_weight_assign(cm, mbmi, 0, &xd->jcp_param.fwd_offset,
&xd->jcp_param.bck_offset, 1);
#endif // CONFIG_JNT_COMP
if (scaled_ref_frame) {
......@@ -6930,11 +6877,7 @@ static void compound_single_motion_search_interinter(
// Prediction buffer from second frame.
#if CONFIG_HIGHBITDEPTH
#if CONFIG_JNT_COMP
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE + 2]);
#else
DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[MAX_SB_SQUARE]);
#endif // CONFIG_JNT_COMP
uint8_t *second_pred;
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment