Commit a3fbcc88 authored by Dmitry Kovalev's avatar Dmitry Kovalev
Browse files

Using single struct to represent scale factors.

Moving back to scale_factors struct. We don't need anymore x_offset_q4 and
y_offset_q4 because both values are calculated locally inside vp9_scale_mv
function.

Change-Id: I78a2122ba253c428a14558bda0e78ece738d2b5b
parent 40e173ac
...@@ -215,7 +215,7 @@ struct macroblockd_plane { ...@@ -215,7 +215,7 @@ struct macroblockd_plane {
typedef struct macroblockd { typedef struct macroblockd {
struct macroblockd_plane plane[MAX_MB_PLANE]; struct macroblockd_plane plane[MAX_MB_PLANE];
struct scale_factors scale_factor[2]; const struct scale_factors *scale_factors[2];
MODE_INFO *last_mi; MODE_INFO *last_mi;
int mode_info_stride; int mode_info_stride;
......
...@@ -123,7 +123,6 @@ typedef struct VP9Common { ...@@ -123,7 +123,6 @@ typedef struct VP9Common {
// Each frame can reference REFS_PER_FRAME buffers // Each frame can reference REFS_PER_FRAME buffers
int active_ref_idx[REFS_PER_FRAME]; int active_ref_idx[REFS_PER_FRAME];
struct scale_factors active_ref_scale[REFS_PER_FRAME]; struct scale_factors active_ref_scale[REFS_PER_FRAME];
struct scale_factors_common active_ref_scale_comm[REFS_PER_FRAME];
int new_fb_idx; int new_fb_idx;
YV12_BUFFER_CONFIG post_proc_buffer; YV12_BUFFER_CONFIG post_proc_buffer;
......
...@@ -66,11 +66,11 @@ static void inter_predictor(const uint8_t *src, int src_stride, ...@@ -66,11 +66,11 @@ static void inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride, uint8_t *dst, int dst_stride,
const int subpel_x, const int subpel_x,
const int subpel_y, const int subpel_y,
const struct scale_factors *scale, const struct scale_factors *sf,
int w, int h, int ref, int w, int h, int ref,
const struct subpix_fn_table *subpix, const struct subpix_fn_table *subpix,
int xs, int ys) { int xs, int ys) {
scale->sfc->predict[subpel_x != 0][subpel_y != 0][ref]( sf->predict[subpel_x != 0][subpel_y != 0][ref](
src, src_stride, dst, dst_stride, src, src_stride, dst, dst_stride,
subpix->filter_x[subpel_x], xs, subpix->filter_x[subpel_x], xs,
subpix->filter_y[subpel_y], ys, subpix->filter_y[subpel_y], ys,
...@@ -80,7 +80,7 @@ static void inter_predictor(const uint8_t *src, int src_stride, ...@@ -80,7 +80,7 @@ static void inter_predictor(const uint8_t *src, int src_stride,
void vp9_build_inter_predictor(const uint8_t *src, int src_stride, void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride, uint8_t *dst, int dst_stride,
const MV *src_mv, const MV *src_mv,
struct scale_factors *scale, const struct scale_factors *sf,
int w, int h, int ref, int w, int h, int ref,
const struct subpix_fn_table *subpix, const struct subpix_fn_table *subpix,
enum mv_precision precision, enum mv_precision precision,
...@@ -88,19 +88,14 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride, ...@@ -88,19 +88,14 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
const int is_q4 = precision == MV_PRECISION_Q4; const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2, const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 }; is_q4 ? src_mv->col : src_mv->col * 2 };
const struct scale_factors_common *sfc = scale->sfc; MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
int subpel_x, subpel_y; const int subpel_x = mv.col & SUBPEL_MASK;
MV32 mv; const int subpel_y = mv.row & SUBPEL_MASK;
sfc->set_scaled_offsets(scale, y, x);
mv = sfc->scale_mv(&mv_q4, scale);
subpel_x = mv.col & SUBPEL_MASK;
subpel_y = mv.row & SUBPEL_MASK;
src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS); src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
scale, w, h, ref, subpix, sfc->x_step_q4, sfc->y_step_q4); sf, w, h, ref, subpix, sf->x_step_q4, sf->y_step_q4);
} }
static INLINE int round_mv_comp_q4(int value) { static INLINE int round_mv_comp_q4(int value) {
...@@ -158,7 +153,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -158,7 +153,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int ref; int ref;
for (ref = 0; ref < 1 + is_compound; ++ref) { for (ref = 0; ref < 1 + is_compound; ++ref) {
struct scale_factors *const scale = &xd->scale_factor[ref]; const struct scale_factors *const sf = xd->scale_factors[ref];
struct buf_2d *const pre_buf = &pd->pre[ref]; struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst; struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
...@@ -185,12 +180,11 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -185,12 +180,11 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
MV32 scaled_mv; MV32 scaled_mv;
int xs, ys, subpel_x, subpel_y; int xs, ys, subpel_x, subpel_y;
if (vp9_is_scaled(scale->sfc)) { if (vp9_is_scaled(sf)) {
pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, scale); pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x); scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); xs = sf->x_step_q4;
xs = scale->sfc->x_step_q4; ys = sf->y_step_q4;
ys = scale->sfc->y_step_q4;
} else { } else {
pre = pre_buf->buf + (y * pre_buf->stride + x); pre = pre_buf->buf + (y * pre_buf->stride + x);
scaled_mv.row = mv_q4.row; scaled_mv.row = mv_q4.row;
...@@ -203,7 +197,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -203,7 +197,7 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
+ (scaled_mv.col >> SUBPEL_BITS); + (scaled_mv.col >> SUBPEL_BITS);
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
subpel_x, subpel_y, scale, w, h, ref, &xd->subpix, xs, ys); subpel_x, subpel_y, sf, w, h, ref, &xd->subpix, xs, ys);
} }
} }
...@@ -262,7 +256,7 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -262,7 +256,7 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int ref; int ref;
for (ref = 0; ref < 1 + is_compound; ++ref) { for (ref = 0; ref < 1 + is_compound; ++ref) {
struct scale_factors *const scale = &xd->scale_factor[ref]; const struct scale_factors *const sf = xd->scale_factors[ref];
struct buf_2d *const pre_buf = &pd->pre[ref]; struct buf_2d *const pre_buf = &pd->pre[ref];
struct buf_2d *const dst_buf = &pd->dst; struct buf_2d *const dst_buf = &pd->dst;
uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x; uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
...@@ -310,16 +304,15 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -310,16 +304,15 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
x0_16 = x0 << SUBPEL_BITS; x0_16 = x0 << SUBPEL_BITS;
y0_16 = y0 << SUBPEL_BITS; y0_16 = y0 << SUBPEL_BITS;
if (vp9_is_scaled(scale->sfc)) { if (vp9_is_scaled(sf)) {
scale->sfc->set_scaled_offsets(scale, mi_y + y, mi_x + x); scaled_mv = vp9_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
scaled_mv = scale->sfc->scale_mv(&mv_q4, scale); xs = sf->x_step_q4;
xs = scale->sfc->x_step_q4; ys = sf->y_step_q4;
ys = scale->sfc->y_step_q4;
// Get block position in the scaled reference frame. // Get block position in the scaled reference frame.
x0 = scale->sfc->scale_value_x(x0, scale->sfc); x0 = sf->scale_value_x(x0, sf);
y0 = scale->sfc->scale_value_y(y0, scale->sfc); y0 = sf->scale_value_y(y0, sf);
x0_16 = scale->sfc->scale_value_x(x0_16, scale->sfc); x0_16 = sf->scale_value_x(x0_16, sf);
y0_16 = scale->sfc->scale_value_y(y0_16, scale->sfc); y0_16 = sf->scale_value_y(y0_16, sf);
} else { } else {
scaled_mv.row = mv_q4.row; scaled_mv.row = mv_q4.row;
scaled_mv.col = mv_q4.col; scaled_mv.col = mv_q4.col;
...@@ -367,7 +360,7 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -367,7 +360,7 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
} }
inter_predictor(buf_ptr, pre_buf->stride, dst, dst_buf->stride, subpel_x, inter_predictor(buf_ptr, pre_buf->stride, dst, dst_buf->stride, subpel_x,
subpel_y, scale, w, h, ref, &xd->subpix, xs, ys); subpel_y, sf, w, h, ref, &xd->subpix, xs, ys);
} }
} }
...@@ -402,15 +395,9 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, ...@@ -402,15 +395,9 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
void vp9_setup_scale_factors(VP9_COMMON *cm, int i) { void vp9_setup_scale_factors(VP9_COMMON *cm, int i) {
const int ref = cm->active_ref_idx[i]; const int ref = cm->active_ref_idx[i];
struct scale_factors *const sf = &cm->active_ref_scale[i]; struct scale_factors *const sf = &cm->active_ref_scale[i];
struct scale_factors_common *const sfc = &cm->active_ref_scale_comm[i]; YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
if (ref >= cm->fb_count) { vp9_setup_scale_factors_for_frame(sf,
vp9_zero(*sf); fb->y_crop_width, fb->y_crop_height,
vp9_zero(*sfc); cm->width, cm->height);
} else {
YV12_BUFFER_CONFIG *const fb = &cm->yv12_fb[ref];
vp9_setup_scale_factors_for_frame(sf, sfc,
fb->y_crop_width, fb->y_crop_height,
cm->width, cm->height);
}
} }
...@@ -30,18 +30,16 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col, ...@@ -30,18 +30,16 @@ void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
void vp9_build_inter_predictor(const uint8_t *src, int src_stride, void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride, uint8_t *dst, int dst_stride,
const MV *mv_q3, const MV *mv_q3,
struct scale_factors *scale, const struct scale_factors *sf,
int w, int h, int do_avg, int w, int h, int do_avg,
const struct subpix_fn_table *subpix, const struct subpix_fn_table *subpix,
enum mv_precision precision, enum mv_precision precision,
int x, int y); int x, int y);
static int scaled_buffer_offset(int x_offset, int y_offset, int stride, static int scaled_buffer_offset(int x_offset, int y_offset, int stride,
const struct scale_factors *scale) { const struct scale_factors *sf) {
const int x = scale ? scale->sfc->scale_value_x(x_offset, scale->sfc) : const int x = sf ? sf->scale_value_x(x_offset, sf) : x_offset;
x_offset; const int y = sf ? sf->scale_value_y(y_offset, sf) : y_offset;
const int y = scale ? scale->sfc->scale_value_y(y_offset, scale->sfc) :
y_offset;
return y * stride + x; return y * stride + x;
} }
...@@ -92,10 +90,10 @@ static void setup_pre_planes(MACROBLOCKD *xd, int i, ...@@ -92,10 +90,10 @@ static void setup_pre_planes(MACROBLOCKD *xd, int i,
} }
} }
static void set_scale_factors(MACROBLOCKD *xd, int ref0, int ref1, static void set_scale_factors(VP9_COMMON *cm, MACROBLOCKD *xd,
struct scale_factors sf[MAX_REF_FRAMES]) { int ref0, int ref1) {
xd->scale_factor[0] = sf[ref0 >= 0 ? ref0 : 0]; xd->scale_factors[0] = &cm->active_ref_scale[ref0 >= 0 ? ref0 : 0];
xd->scale_factor[1] = sf[ref1 >= 0 ? ref1 : 0]; xd->scale_factors[1] = &cm->active_ref_scale[ref1 >= 0 ? ref1 : 0];
} }
void vp9_setup_scale_factors(VP9_COMMON *cm, int i); void vp9_setup_scale_factors(VP9_COMMON *cm, int i);
......
...@@ -12,47 +12,19 @@ ...@@ -12,47 +12,19 @@
#include "vp9/common/vp9_filter.h" #include "vp9/common/vp9_filter.h"
#include "vp9/common/vp9_scale.h" #include "vp9/common/vp9_scale.h"
static INLINE int scaled_x(int val, const struct scale_factors_common *sfc) { static INLINE int scaled_x(int val, const struct scale_factors *sf) {
return val * sfc->x_scale_fp >> REF_SCALE_SHIFT; return val * sf->x_scale_fp >> REF_SCALE_SHIFT;
} }
static INLINE int scaled_y(int val, const struct scale_factors_common *sfc) { static INLINE int scaled_y(int val, const struct scale_factors *sf) {
return val * sfc->y_scale_fp >> REF_SCALE_SHIFT; return val * sf->y_scale_fp >> REF_SCALE_SHIFT;
} }
static int unscaled_value(int val, const struct scale_factors_common *sfc) { static int unscaled_value(int val, const struct scale_factors *sf) {
(void) sfc; (void) sf;
return val; return val;
} }
static MV32 scaled_mv(const MV *mv, const struct scale_factors *scale) {
const MV32 res = {
scaled_y(mv->row, scale->sfc) + scale->y_offset_q4,
scaled_x(mv->col, scale->sfc) + scale->x_offset_q4
};
return res;
}
static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) {
const MV32 res = {
mv->row,
mv->col
};
return res;
}
static void set_offsets_with_scaling(struct scale_factors *scale,
int row, int col) {
scale->x_offset_q4 = scaled_x(col << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK;
scale->y_offset_q4 = scaled_y(row << SUBPEL_BITS, scale->sfc) & SUBPEL_MASK;
}
static void set_offsets_without_scaling(struct scale_factors *scale,
int row, int col) {
scale->x_offset_q4 = 0;
scale->y_offset_q4 = 0;
}
static int get_fixed_point_scale_factor(int other_size, int this_size) { static int get_fixed_point_scale_factor(int other_size, int this_size) {
// Calculate scaling factor once for each reference frame // Calculate scaling factor once for each reference frame
// and use fixed point scaling factors in decoding and encoding routines. // and use fixed point scaling factors in decoding and encoding routines.
...@@ -69,31 +41,36 @@ static int check_scale_factors(int other_w, int other_h, ...@@ -69,31 +41,36 @@ static int check_scale_factors(int other_w, int other_h,
this_h <= 16 * other_h; this_h <= 16 * other_h;
} }
void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
struct scale_factors_common *scale_comm, const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
const MV32 res = {
scaled_y(mv->row, sf) + y_off_q4,
scaled_x(mv->col, sf) + x_off_q4
};
return res;
}
void vp9_setup_scale_factors_for_frame(struct scale_factors *sf,
int other_w, int other_h, int other_w, int other_h,
int this_w, int this_h) { int this_w, int this_h) {
if (!check_scale_factors(other_w, other_h, this_w, this_h)) { if (!check_scale_factors(other_w, other_h, this_w, this_h)) {
scale_comm->x_scale_fp = REF_INVALID_SCALE; sf->x_scale_fp = REF_INVALID_SCALE;
scale_comm->y_scale_fp = REF_INVALID_SCALE; sf->y_scale_fp = REF_INVALID_SCALE;
return; return;
} }
scale_comm->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w); sf->x_scale_fp = get_fixed_point_scale_factor(other_w, this_w);
scale_comm->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h); sf->y_scale_fp = get_fixed_point_scale_factor(other_h, this_h);
scale_comm->x_step_q4 = scaled_x(16, scale_comm); sf->x_step_q4 = scaled_x(16, sf);
scale_comm->y_step_q4 = scaled_y(16, scale_comm); sf->y_step_q4 = scaled_y(16, sf);
if (vp9_is_scaled(scale_comm)) { if (vp9_is_scaled(sf)) {
scale_comm->scale_value_x = scaled_x; sf->scale_value_x = scaled_x;
scale_comm->scale_value_y = scaled_y; sf->scale_value_y = scaled_y;
scale_comm->set_scaled_offsets = set_offsets_with_scaling;
scale_comm->scale_mv = scaled_mv;
} else { } else {
scale_comm->scale_value_x = unscaled_value; sf->scale_value_x = unscaled_value;
scale_comm->scale_value_y = unscaled_value; sf->scale_value_y = unscaled_value;
scale_comm->set_scaled_offsets = set_offsets_without_scaling;
scale_comm->scale_mv = unscaled_mv;
} }
// TODO(agrange): Investigate the best choice of functions to use here // TODO(agrange): Investigate the best choice of functions to use here
...@@ -102,48 +79,44 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, ...@@ -102,48 +79,44 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
// applied in one direction only, and not at all for 0,0, seems to give the // applied in one direction only, and not at all for 0,0, seems to give the
// best quality, but it may be worth trying an additional mode that does // best quality, but it may be worth trying an additional mode that does
// do the filtering on full-pel. // do the filtering on full-pel.
if (scale_comm->x_step_q4 == 16) { if (sf->x_step_q4 == 16) {
if (scale_comm->y_step_q4 == 16) { if (sf->y_step_q4 == 16) {
// No scaling in either direction. // No scaling in either direction.
scale_comm->predict[0][0][0] = vp9_convolve_copy; sf->predict[0][0][0] = vp9_convolve_copy;
scale_comm->predict[0][0][1] = vp9_convolve_avg; sf->predict[0][0][1] = vp9_convolve_avg;
scale_comm->predict[0][1][0] = vp9_convolve8_vert; sf->predict[0][1][0] = vp9_convolve8_vert;
scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; sf->predict[0][1][1] = vp9_convolve8_avg_vert;
scale_comm->predict[1][0][0] = vp9_convolve8_horiz; sf->predict[1][0][0] = vp9_convolve8_horiz;
scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; sf->predict[1][0][1] = vp9_convolve8_avg_horiz;
} else { } else {
// No scaling in x direction. Must always scale in the y direction. // No scaling in x direction. Must always scale in the y direction.
scale_comm->predict[0][0][0] = vp9_convolve8_vert; sf->predict[0][0][0] = vp9_convolve8_vert;
scale_comm->predict[0][0][1] = vp9_convolve8_avg_vert; sf->predict[0][0][1] = vp9_convolve8_avg_vert;
scale_comm->predict[0][1][0] = vp9_convolve8_vert; sf->predict[0][1][0] = vp9_convolve8_vert;
scale_comm->predict[0][1][1] = vp9_convolve8_avg_vert; sf->predict[0][1][1] = vp9_convolve8_avg_vert;
scale_comm->predict[1][0][0] = vp9_convolve8; sf->predict[1][0][0] = vp9_convolve8;
scale_comm->predict[1][0][1] = vp9_convolve8_avg; sf->predict[1][0][1] = vp9_convolve8_avg;
} }
} else { } else {
if (scale_comm->y_step_q4 == 16) { if (sf->y_step_q4 == 16) {
// No scaling in the y direction. Must always scale in the x direction. // No scaling in the y direction. Must always scale in the x direction.
scale_comm->predict[0][0][0] = vp9_convolve8_horiz; sf->predict[0][0][0] = vp9_convolve8_horiz;
scale_comm->predict[0][0][1] = vp9_convolve8_avg_horiz; sf->predict[0][0][1] = vp9_convolve8_avg_horiz;
scale_comm->predict[0][1][0] = vp9_convolve8; sf->predict[0][1][0] = vp9_convolve8;
scale_comm->predict[0][1][1] = vp9_convolve8_avg; sf->predict[0][1][1] = vp9_convolve8_avg;
scale_comm->predict[1][0][0] = vp9_convolve8_horiz; sf->predict[1][0][0] = vp9_convolve8_horiz;
scale_comm->predict[1][0][1] = vp9_convolve8_avg_horiz; sf->predict[1][0][1] = vp9_convolve8_avg_horiz;
} else { } else {
// Must always scale in both directions. // Must always scale in both directions.
scale_comm->predict[0][0][0] = vp9_convolve8; sf->predict[0][0][0] = vp9_convolve8;
scale_comm->predict[0][0][1] = vp9_convolve8_avg; sf->predict[0][0][1] = vp9_convolve8_avg;
scale_comm->predict[0][1][0] = vp9_convolve8; sf->predict[0][1][0] = vp9_convolve8;
scale_comm->predict[0][1][1] = vp9_convolve8_avg; sf->predict[0][1][1] = vp9_convolve8_avg;
scale_comm->predict[1][0][0] = vp9_convolve8; sf->predict[1][0][0] = vp9_convolve8;
scale_comm->predict[1][0][1] = vp9_convolve8_avg; sf->predict[1][0][1] = vp9_convolve8_avg;
} }
} }
// 2D subpel motion always gets filtered in both directions // 2D subpel motion always gets filtered in both directions
scale_comm->predict[1][1][0] = vp9_convolve8; sf->predict[1][1][0] = vp9_convolve8;
scale_comm->predict[1][1][1] = vp9_convolve8_avg; sf->predict[1][1][1] = vp9_convolve8_avg;
scale->sfc = scale_comm;
scale->x_offset_q4 = 0; // calculated per block
scale->y_offset_q4 = 0; // calculated per block
} }
...@@ -18,40 +18,32 @@ ...@@ -18,40 +18,32 @@
#define REF_NO_SCALE (1 << REF_SCALE_SHIFT) #define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
#define REF_INVALID_SCALE -1 #define REF_INVALID_SCALE -1
struct scale_factors; struct scale_factors {
struct scale_factors_common {
int x_scale_fp; // horizontal fixed point scale factor int x_scale_fp; // horizontal fixed point scale factor
int y_scale_fp; // vertical fixed point scale factor int y_scale_fp; // vertical fixed point scale factor
int x_step_q4; int x_step_q4;
int y_step_q4; int y_step_q4;
int (*scale_value_x)(int val, const struct scale_factors_common *sfc); int (*scale_value_x)(int val, const struct scale_factors *sf);
int (*scale_value_y)(int val, const struct scale_factors_common *sfc); int (*scale_value_y)(int val, const struct scale_factors *sf);
void (*set_scaled_offsets)(struct scale_factors *scale, int row, int col);
MV32 (*scale_mv)(const MV *mv, const struct scale_factors *scale);
convolve_fn_t predict[2][2][2]; // horiz, vert, avg convolve_fn_t predict[2][2][2]; // horiz, vert, avg
}; };
struct scale_factors { MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
int x_offset_q4;
int y_offset_q4;
const struct scale_factors_common *sfc;
};
void vp9_setup_scale_factors_for_frame(struct scale_factors *scale, void vp9_setup_scale_factors_for_frame(struct scale_factors *sf,
struct scale_factors_common *scale_comm,
int other_w, int other_h, int other_w, int other_h,
int this_w, int this_h); int this_w, int this_h);
static int vp9_is_valid_scale(const struct scale_factors_common *sfc) { static int vp9_is_valid_scale(const struct scale_factors *sf) {
return sfc->x_scale_fp != REF_INVALID_SCALE && return sf->x_scale_fp != REF_INVALID_SCALE &&
sfc->y_scale_fp != REF_INVALID_SCALE; sf->y_scale_fp != REF_INVALID_SCALE;
} }
static int vp9_is_scaled(const struct scale_factors_common *sfc) { static int vp9_is_scaled(const struct scale_factors *sf) {
return sfc->x_scale_fp != REF_NO_SCALE || return sf->x_scale_fp != REF_NO_SCALE ||
sfc->y_scale_fp != REF_NO_SCALE; sf->y_scale_fp != REF_NO_SCALE;
} }
#endif // VP9_COMMON_VP9_SCALE_H_ #endif // VP9_COMMON_VP9_SCALE_H_
...@@ -380,15 +380,15 @@ static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd, ...@@ -380,15 +380,15 @@ static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi; MB_MODE_INFO *const mbmi = &xd->mi_8x8[0]->mbmi;
const int ref = mbmi->ref_frame[idx] - LAST_FRAME; const int ref = mbmi->ref_frame[idx] - LAST_FRAME;
const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref); const YV12_BUFFER_CONFIG *cfg = get_frame_ref_buffer(cm, ref);
const struct scale_factors_common *sfc = &cm->active_ref_scale_comm[ref]; const struct scale_factors *sf = &cm->active_ref_scale[ref];
xd->ref_buf[idx] = cfg; xd->ref_buf[idx] = cfg;
if (!vp9_is_valid_scale(sfc)) if (!vp9_is_valid_scale(sf))
vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM, vpx_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
"Invalid scale factors"); "Invalid scale factors");
xd->scale_factor[idx].sfc = sfc; xd->scale_factors[idx] = sf;
setup_pre_planes(xd, idx, cfg, mi_row, mi_col, &xd->scale_factor[idx]); setup_pre_planes(xd, idx, cfg, mi_row, mi_col, xd->scale_factors[idx]);
xd->corrupted |= cfg->corrupted; xd->corrupted |= cfg->corrupted;
} }
...@@ -1201,7 +1201,7 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi, ...@@ -1201,7 +1201,7 @@ static size_t read_uncompressed_header(VP9D_COMP *pbi,
for (i = 0; i < REFS_PER_FRAME; ++i) { for (i = 0; i < REFS_PER_FRAME; ++i) {
vp9_setup_scale_factors(cm, i); vp9_setup_scale_factors(cm, i);