Commit f78e5a04 authored by Jim Bankoski's avatar Jim Bankoski Committed by John Koleszar

fix denoiser for temporal patterns and rd

This extends the denoiser to work for temporally scalable
coding.

I believe this also fixes a very rare but really bad bug in the original
implementation.

Change-Id: I8b3593a8c54b86eb76f785af1970935f7d56262a
parent 5715c39c
...@@ -216,12 +216,6 @@ typedef struct macroblockd ...@@ -216,12 +216,6 @@ typedef struct macroblockd
MODE_INFO *mode_info_context; MODE_INFO *mode_info_context;
int mode_info_stride; int mode_info_stride;
#if CONFIG_TEMPORAL_DENOISING
MB_PREDICTION_MODE best_sse_inter_mode;
int_mv best_sse_mv;
unsigned char need_to_clamp_best_mvs;
#endif
FRAME_TYPE frame_type; FRAME_TYPE frame_type;
int up_available; int up_available;
......
...@@ -119,6 +119,16 @@ typedef struct macroblock ...@@ -119,6 +119,16 @@ typedef struct macroblock
int optimize; int optimize;
int q_index; int q_index;
#if CONFIG_TEMPORAL_DENOISING
MB_PREDICTION_MODE best_sse_inter_mode;
int_mv best_sse_mv;
MV_REFERENCE_FRAME best_reference_frame;
MV_REFERENCE_FRAME best_zeromv_reference_frame;
unsigned char need_to_clamp_best_mvs;
#endif
void (*short_fdct4x4)(short *input, short *output, int pitch); void (*short_fdct4x4)(short *input, short *output, int pitch);
void (*short_fdct8x4)(short *input, short *output, int pitch); void (*short_fdct8x4)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch); void (*short_walsh4x4)(short *input, short *output, int pitch);
......
...@@ -22,68 +22,6 @@ static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20; ...@@ -22,68 +22,6 @@ static const unsigned int SSE_DIFF_THRESHOLD = 16 * 16 * 20;
static const unsigned int SSE_THRESHOLD = 16 * 16 * 40; static const unsigned int SSE_THRESHOLD = 16 * 16 * 40;
static unsigned int denoiser_motion_compensate(YV12_BUFFER_CONFIG *src,
YV12_BUFFER_CONFIG *dst,
MACROBLOCK *x,
unsigned int best_sse,
unsigned int zero_mv_sse,
int recon_yoffset,
int recon_uvoffset)
{
MACROBLOCKD filter_xd = x->e_mbd;
int mv_col;
int mv_row;
int sse_diff = zero_mv_sse - best_sse;
// Compensate the running average.
filter_xd.pre.y_buffer = src->y_buffer + recon_yoffset;
filter_xd.pre.u_buffer = src->u_buffer + recon_uvoffset;
filter_xd.pre.v_buffer = src->v_buffer + recon_uvoffset;
// Write the compensated running average to the destination buffer.
filter_xd.dst.y_buffer = dst->y_buffer + recon_yoffset;
filter_xd.dst.u_buffer = dst->u_buffer + recon_uvoffset;
filter_xd.dst.v_buffer = dst->v_buffer + recon_uvoffset;
// Use the best MV for the compensation.
filter_xd.mode_info_context->mbmi.ref_frame = LAST_FRAME;
filter_xd.mode_info_context->mbmi.mode = filter_xd.best_sse_inter_mode;
filter_xd.mode_info_context->mbmi.mv = filter_xd.best_sse_mv;
filter_xd.mode_info_context->mbmi.need_to_clamp_mvs =
filter_xd.need_to_clamp_best_mvs;
mv_col = filter_xd.best_sse_mv.as_mv.col;
mv_row = filter_xd.best_sse_mv.as_mv.row;
if (filter_xd.mode_info_context->mbmi.mode <= B_PRED ||
(mv_row *mv_row + mv_col *mv_col <= NOISE_MOTION_THRESHOLD &&
sse_diff < SSE_DIFF_THRESHOLD))
{
// Handle intra blocks as referring to last frame with zero motion and
// let the absolute pixel difference affect the filter factor.
// Also consider small amount of motion as being random walk due to
// noise, if it doesn't mean that we get a much bigger error.
// Note that any changes to the mode info only affects the denoising.
filter_xd.mode_info_context->mbmi.ref_frame = LAST_FRAME;
filter_xd.mode_info_context->mbmi.mode = ZEROMV;
filter_xd.mode_info_context->mbmi.mv.as_int = 0;
x->e_mbd.best_sse_inter_mode = ZEROMV;
x->e_mbd.best_sse_mv.as_int = 0;
best_sse = zero_mv_sse;
}
if (!x->skip)
{
vp8_build_inter_predictors_mb(&filter_xd);
}
else
{
vp8_build_inter16x16_predictors_mb(&filter_xd,
filter_xd.dst.y_buffer,
filter_xd.dst.u_buffer,
filter_xd.dst.v_buffer,
filter_xd.dst.y_stride,
filter_xd.dst.uv_stride);
}
return best_sse;
}
// The filtering coefficients used for denoizing are adjusted for static // The filtering coefficients used for denoizing are adjusted for static
// blocks, or blocks with very small motion vectors. This is done through // blocks, or blocks with very small motion vectors. This is done through
...@@ -216,27 +154,34 @@ void vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg, ...@@ -216,27 +154,34 @@ void vp8_denoiser_filter_c(YV12_BUFFER_CONFIG *mc_running_avg,
int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height) int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height)
{ {
int i;
assert(denoiser); assert(denoiser);
denoiser->yv12_running_avg.flags = 0;
if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_running_avg), width, /* don't need one for intra start at 1 */
height, VP8BORDERINPIXELS) < 0) for (i = 1; i < MAX_REF_FRAMES; i++)
{ {
vp8_denoiser_free(denoiser); denoiser->yv12_running_avg[i].flags = 0;
return 1;
}
if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_running_avg[i]), width,
height, VP8BORDERINPIXELS)
< 0)
{
vp8_denoiser_free(denoiser);
return 1;
}
vpx_memset(denoiser->yv12_running_avg[i].buffer_alloc, 0,
denoiser->yv12_running_avg[i].frame_size);
}
denoiser->yv12_mc_running_avg.flags = 0; denoiser->yv12_mc_running_avg.flags = 0;
if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_mc_running_avg), width, if (vp8_yv12_alloc_frame_buffer(&(denoiser->yv12_mc_running_avg), width,
height, VP8BORDERINPIXELS) < 0) height, VP8BORDERINPIXELS) < 0)
{ {
vp8_denoiser_free(denoiser); vp8_denoiser_free(denoiser);
return 1; return 1;
} }
vpx_memset(denoiser->yv12_running_avg.buffer_alloc, 0,
denoiser->yv12_running_avg.frame_size);
vpx_memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0, vpx_memset(denoiser->yv12_mc_running_avg.buffer_alloc, 0,
denoiser->yv12_mc_running_avg.frame_size); denoiser->yv12_mc_running_avg.frame_size);
return 0; return 0;
...@@ -244,11 +189,18 @@ int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height) ...@@ -244,11 +189,18 @@ int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height)
void vp8_denoiser_free(VP8_DENOISER *denoiser) void vp8_denoiser_free(VP8_DENOISER *denoiser)
{ {
int i;
assert(denoiser); assert(denoiser);
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg);
/* we don't have one for intra ref frame */
for (i = 1; i < MAX_REF_FRAMES ; i++)
{
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_running_avg[i]);
}
vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_mc_running_avg); vp8_yv12_de_alloc_frame_buffer(&denoiser->yv12_mc_running_avg);
} }
void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
MACROBLOCK *x, MACROBLOCK *x,
unsigned int best_sse, unsigned int best_sse,
...@@ -259,32 +211,103 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser, ...@@ -259,32 +211,103 @@ void vp8_denoiser_denoise_mb(VP8_DENOISER *denoiser,
int mv_row; int mv_row;
int mv_col; int mv_col;
unsigned int motion_magnitude2; unsigned int motion_magnitude2;
MV_REFERENCE_FRAME frame = x->best_reference_frame;
MV_REFERENCE_FRAME zero_frame = x->best_zeromv_reference_frame;
// Motion compensate the running average. // Motion compensate the running average.
best_sse = denoiser_motion_compensate(&denoiser->yv12_running_avg, if(zero_frame)
&denoiser->yv12_mc_running_avg, {
x, YV12_BUFFER_CONFIG *src = &denoiser->yv12_running_avg[frame];
best_sse, YV12_BUFFER_CONFIG *dst = &denoiser->yv12_mc_running_avg;
zero_mv_sse, YV12_BUFFER_CONFIG saved_pre,saved_dst;
recon_yoffset, MB_MODE_INFO saved_mbmi;
recon_uvoffset); MACROBLOCKD *filter_xd = &x->e_mbd;
MB_MODE_INFO *mbmi = &filter_xd->mode_info_context->mbmi;
mv_row = x->e_mbd.best_sse_mv.as_mv.row; int mv_col;
mv_col = x->e_mbd.best_sse_mv.as_mv.col; int mv_row;
motion_magnitude2 = mv_row * mv_row + mv_col * mv_col; int sse_diff = zero_mv_sse - best_sse;
saved_mbmi = *mbmi;
// Use the best MV for the compensation.
mbmi->ref_frame = x->best_reference_frame;
mbmi->mode = x->best_sse_inter_mode;
mbmi->mv = x->best_sse_mv;
mbmi->need_to_clamp_mvs = x->need_to_clamp_best_mvs;
mv_col = x->best_sse_mv.as_mv.col;
mv_row = x->best_sse_mv.as_mv.row;
if(frame == INTRA_FRAME ||
(mv_row *mv_row + mv_col *mv_col <= NOISE_MOTION_THRESHOLD &&
sse_diff < SSE_DIFF_THRESHOLD))
{
// Handle intra blocks as referring to last frame with zero motion
// and let the absolute pixel difference affect the filter factor.
// Also consider small amount of motion as being random walk due to
// noise, if it doesn't mean that we get a much bigger error.
// Note that any changes to the mode info only affects the denoising.
mbmi->ref_frame =
x->best_zeromv_reference_frame;
src = &denoiser->yv12_running_avg[zero_frame];
mbmi->mode = ZEROMV;
mbmi->mv.as_int = 0;
x->best_sse_inter_mode = ZEROMV;
x->best_sse_mv.as_int = 0;
best_sse = zero_mv_sse;
}
saved_pre = filter_xd->pre;
saved_dst = filter_xd->dst;
if (best_sse > SSE_THRESHOLD || // Compensate the running average.
motion_magnitude2 > 8 * NOISE_MOTION_THRESHOLD) filter_xd->pre.y_buffer = src->y_buffer + recon_yoffset;
filter_xd->pre.u_buffer = src->u_buffer + recon_uvoffset;
filter_xd->pre.v_buffer = src->v_buffer + recon_uvoffset;
// Write the compensated running average to the destination buffer.
filter_xd->dst.y_buffer = dst->y_buffer + recon_yoffset;
filter_xd->dst.u_buffer = dst->u_buffer + recon_uvoffset;
filter_xd->dst.v_buffer = dst->v_buffer + recon_uvoffset;
if (!x->skip)
{
vp8_build_inter_predictors_mb(filter_xd);
}
else
{
vp8_build_inter16x16_predictors_mb(filter_xd,
filter_xd->dst.y_buffer,
filter_xd->dst.u_buffer,
filter_xd->dst.v_buffer,
filter_xd->dst.y_stride,
filter_xd->dst.uv_stride);
}
filter_xd->pre = saved_pre;
filter_xd->dst = saved_dst;
*mbmi = saved_mbmi;
}
mv_row = x->best_sse_mv.as_mv.row;
mv_col = x->best_sse_mv.as_mv.col;
motion_magnitude2 = mv_row * mv_row + mv_col * mv_col;
if (best_sse > SSE_THRESHOLD || motion_magnitude2
> 8 * NOISE_MOTION_THRESHOLD)
{ {
// No filtering of this block since it differs too much from the // No filtering of this block; it differs too much from the predictor,
// predictor, or the motion vector magnitude is considered too big. // or the motion vector magnitude is considered too big.
vp8_copy_mem16x16(x->thismb, 16, vp8_copy_mem16x16(
denoiser->yv12_running_avg.y_buffer + recon_yoffset, x->thismb, 16,
denoiser->yv12_running_avg.y_stride); denoiser->yv12_running_avg[LAST_FRAME].y_buffer + recon_yoffset,
denoiser->yv12_running_avg[LAST_FRAME].y_stride);
return; return;
} }
// Filter. // Filter.
vp8_denoiser_filter(&denoiser->yv12_mc_running_avg, vp8_denoiser_filter(&denoiser->yv12_mc_running_avg,
&denoiser->yv12_running_avg, x, motion_magnitude2, &denoiser->yv12_running_avg[LAST_FRAME], x,
motion_magnitude2,
recon_yoffset, recon_uvoffset); recon_yoffset, recon_uvoffset);
} }
...@@ -17,8 +17,8 @@ ...@@ -17,8 +17,8 @@
typedef struct vp8_denoiser typedef struct vp8_denoiser
{ {
YV12_BUFFER_CONFIG yv12_running_avg; YV12_BUFFER_CONFIG yv12_running_avg[MAX_REF_FRAMES];
YV12_BUFFER_CONFIG yv12_mc_running_avg; YV12_BUFFER_CONFIG yv12_mc_running_avg;
} VP8_DENOISER; } VP8_DENOISER;
int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height); int vp8_denoiser_allocate(VP8_DENOISER *denoiser, int width, int height);
......
...@@ -1177,9 +1177,11 @@ int vp8cx_encode_inter_macroblock ...@@ -1177,9 +1177,11 @@ int vp8cx_encode_inter_macroblock
#if CONFIG_TEMPORAL_DENOISING #if CONFIG_TEMPORAL_DENOISING
// Reset the best sse mode/mv for each macroblock. // Reset the best sse mode/mv for each macroblock.
x->e_mbd.best_sse_inter_mode = 0; x->best_reference_frame = INTRA_FRAME;
x->e_mbd.best_sse_mv.as_int = 0; x->best_zeromv_reference_frame = INTRA_FRAME;
x->e_mbd.need_to_clamp_best_mvs = 0; x->best_sse_inter_mode = 0;
x->best_sse_mv.as_int = 0;
x->need_to_clamp_best_mvs = 0;
#endif #endif
if (cpi->sf.RD) if (cpi->sf.RD)
......
...@@ -3156,9 +3156,49 @@ void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm) ...@@ -3156,9 +3156,49 @@ void vp8_loopfilter_frame(VP8_COMP *cpi, VP8_COMMON *cm)
#if CONFIG_TEMPORAL_DENOISING #if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity) if (cpi->oxcf.noise_sensitivity)
{ {
vp8_yv12_extend_frame_borders(&cpi->denoiser.yv12_running_avg);
/* we shouldn't have to keep multiple copies as we know in advance which
* buffer we should start - for now to get something up and running
* I've chosen to copy the buffers
*/
if (cm->frame_type == KEY_FRAME)
{
int i;
vp8_yv12_copy_frame(
cpi->Source,
&cpi->denoiser.yv12_running_avg[LAST_FRAME]);
vp8_yv12_extend_frame_borders(
&cpi->denoiser.yv12_running_avg[LAST_FRAME]);
for (i = 2; i < MAX_REF_FRAMES - 1; i++)
vp8_yv12_copy_frame(
cpi->Source,
&cpi->denoiser.yv12_running_avg[i]);
}
else /* For non key frames */
{
vp8_yv12_extend_frame_borders(
&cpi->denoiser.yv12_running_avg[LAST_FRAME]);
if (cm->refresh_alt_ref_frame || cm->copy_buffer_to_arf)
{
vp8_yv12_copy_frame(
&cpi->denoiser.yv12_running_avg[LAST_FRAME],
&cpi->denoiser.yv12_running_avg[ALTREF_FRAME]);
}
if (cm->refresh_golden_frame || cm->copy_buffer_to_gf)
{
vp8_yv12_copy_frame(
&cpi->denoiser.yv12_running_avg[LAST_FRAME],
&cpi->denoiser.yv12_running_avg[GOLDEN_FRAME]);
}
}
} }
#endif #endif
} }
static void encode_frame_to_data_rate static void encode_frame_to_data_rate
......
...@@ -61,7 +61,7 @@ int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d, ...@@ -61,7 +61,7 @@ int vp8_skip_fractional_mv_step(MACROBLOCK *mb, BLOCK *b, BLOCKD *d,
} }
static int get_inter_mbpred_error(MACROBLOCK *mb, int vp8_get_inter_mbpred_error(MACROBLOCK *mb,
const vp8_variance_fn_ptr_t *vfp, const vp8_variance_fn_ptr_t *vfp,
unsigned int *sse, unsigned int *sse,
int_mv this_mv) int_mv this_mv)
...@@ -486,7 +486,7 @@ static int evaluate_inter_mode(unsigned int* sse, int rate2, int* distortion2, V ...@@ -486,7 +486,7 @@ static int evaluate_inter_mode(unsigned int* sse, int rate2, int* distortion2, V
if((this_mode != NEWMV) || if((this_mode != NEWMV) ||
!(cpi->sf.half_pixel_search) || cpi->common.full_pixel==1) !(cpi->sf.half_pixel_search) || cpi->common.full_pixel==1)
*distortion2 = get_inter_mbpred_error(x, *distortion2 = vp8_get_inter_mbpred_error(x,
&cpi->fn_ptr[BLOCK_16X16], &cpi->fn_ptr[BLOCK_16X16],
sse, mv); sse, mv);
...@@ -523,7 +523,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, ...@@ -523,7 +523,7 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int best_mode_index = 0; int best_mode_index = 0;
unsigned int sse = INT_MAX, best_rd_sse = INT_MAX; unsigned int sse = INT_MAX, best_rd_sse = INT_MAX;
#if CONFIG_TEMPORAL_DENOISING #if CONFIG_TEMPORAL_DENOISING
unsigned int zero_mv_sse = 0, best_sse = INT_MAX; unsigned int zero_mv_sse = INT_MAX, best_sse = INT_MAX;
#endif #endif
int_mv mvp; int_mv mvp;
...@@ -964,25 +964,27 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, ...@@ -964,25 +964,27 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
#if CONFIG_TEMPORAL_DENOISING #if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity) if (cpi->oxcf.noise_sensitivity)
{ {
// Store for later use by denoiser.
if (this_mode == ZEROMV && // Store for later use by denoiser.
x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) if (this_mode == ZEROMV && sse < zero_mv_sse )
{ {
zero_mv_sse = sse; zero_mv_sse = sse;
} x->best_zeromv_reference_frame =
x->e_mbd.mode_info_context->mbmi.ref_frame;
// Store the best NEWMV in x for later use in the denoiser. }
// We are restricted to the LAST_FRAME since the denoiser only keeps
// one filter state. // Store the best NEWMV in x for later use in the denoiser.
if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV && if (x->e_mbd.mode_info_context->mbmi.mode == NEWMV &&
x->e_mbd.mode_info_context->mbmi.ref_frame == LAST_FRAME) sse < best_sse)
{ {
best_sse = sse; best_sse = sse;
x->e_mbd.best_sse_inter_mode = NEWMV; x->best_sse_inter_mode = NEWMV;
x->e_mbd.best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv; x->best_sse_mv = x->e_mbd.mode_info_context->mbmi.mv;
x->e_mbd.need_to_clamp_best_mvs = x->need_to_clamp_best_mvs =
x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs; x->e_mbd.mode_info_context->mbmi.need_to_clamp_mvs;
} x->best_reference_frame =
x->e_mbd.mode_info_context->mbmi.ref_frame;
}
} }
#endif #endif
...@@ -1058,37 +1060,47 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, ...@@ -1058,37 +1060,47 @@ void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
#if CONFIG_TEMPORAL_DENOISING #if CONFIG_TEMPORAL_DENOISING
if (cpi->oxcf.noise_sensitivity) if (cpi->oxcf.noise_sensitivity)
{ {
if (x->e_mbd.best_sse_inter_mode == DC_PRED) { if (x->best_sse_inter_mode == DC_PRED)
// No best MV found. {
x->e_mbd.best_sse_inter_mode = best_mbmode.mode; // No best MV found.
x->e_mbd.best_sse_mv = best_mbmode.mv; x->best_sse_inter_mode = best_mbmode.mode;
x->e_mbd.need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs; x->best_sse_mv = best_mbmode.mv;
best_sse = best_rd_sse; x->need_to_clamp_best_mvs = best_mbmode.need_to_clamp_mvs;
} x->best_reference_frame = best_mbmode.ref_frame;
vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse, best_sse = best_rd_sse;
recon_yoffset, recon_uvoffset); }
vp8_denoiser_denoise_mb(&cpi->denoiser, x, best_sse, zero_mv_sse,
// Reevaluate ZEROMV after denoising. recon_yoffset, recon_uvoffset);
if (best_mbmode.ref_frame == INTRA_FRAME)
{
int this_rd = 0;
rate2 = 0;
distortion2 = 0;
x->e_mbd.mode_info_context->mbmi.ref_frame = LAST_FRAME;
rate2 += x->ref_frame_cost[x->e_mbd.mode_info_context->mbmi.ref_frame];
this_mode = ZEROMV;
rate2 += vp8_cost_mv_ref(this_mode, mdcounts);
x->e_mbd.mode_info_context->mbmi.mode = this_mode;
x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x);
if (this_rd < best_rd || x->skip)
// Reevaluate ZEROMV after denoising.
if (best_mbmode.ref_frame == INTRA_FRAME &&
x->best_zeromv_reference_frame != INTRA_FRAME)
{ {
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi, int this_rd = 0;
sizeof(MB_MODE_INFO)); int this_ref_frame = x->best_zeromv_reference_frame;
rate2 = x->ref_frame_cost[this_ref_frame] +
vp8_cost_mv_ref(ZEROMV, mdcounts);
distortion2 = 0;
// set up the proper prediction buffers for the frame
x->e_mbd.mode_info_context->mbmi.ref_frame = this_ref_frame;
x->e_mbd.pre.y_buffer = plane[this_ref_frame][0];
x->e_mbd.pre.u_buffer = plane[this_ref_frame][1];
x->e_mbd.pre.v_buffer = plane[this_ref_frame][2];
x->e_mbd.mode_info_context->mbmi.mode = ZEROMV;
x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.mv.as_int = 0;
this_rd = evaluate_inter_mode(&sse, rate2, &distortion2, cpi, x);
if (this_rd < best_rd)
{
vpx_memcpy(&best_mbmode, &x->e_mbd.mode_info_context->mbmi,
sizeof(MB_MODE_INFO));
}
} }
}
} }
#endif #endif
......
...@@ -20,4 +20,8 @@ extern void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, ...@@ -20,4 +20,8 @@ extern void vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset,
int mb_row, int mb_col); int mb_row, int mb_col);
extern void vp8_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate); extern void vp8_pick_intra_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate);
extern int vp8_get_inter_mbpred_error(MACROBLOCK *mb,
const vp8_variance_fn_ptr_t *vfp,
unsigned int *sse,
int_mv this_mv);
#endif #endif
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "onyx_int.h" #include "onyx_int.h"
#include "modecosts.h" #include "modecosts.h"
#include "encodeintra.h" #include "encodeintra.h"