Commit 0d3c3d3c authored by Deb Mukherjee's avatar Deb Mukherjee
Browse files

Adds high bitdepth convolve, interpred & scaling

Change-Id: Ie51c352a6b250547207cbc1ebba833a01ed053e3
parent d3a7e677
This diff is collapsed.
......@@ -282,3 +282,280 @@ void vp9_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride,
dst += dst_stride;
}
}
#if CONFIG_VP9_HIGHBITDEPTH
static void high_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const InterpKernel *x_filters,
int x0_q4, int x_step_q4,
int w, int h, int bd) {
int x, y;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= SUBPEL_TAPS / 2 - 1;
for (y = 0; y < h; ++y) {
int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
int k, sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k)
sum += src_x[k] * x_filter[k];
dst[x] = clip_pixel_high(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
x_q4 += x_step_q4;
}
src += src_stride;
dst += dst_stride;
}
}
static void high_convolve_avg_horiz(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const InterpKernel *x_filters,
int x0_q4, int x_step_q4,
int w, int h, int bd) {
int x, y;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= SUBPEL_TAPS / 2 - 1;
for (y = 0; y < h; ++y) {
int x_q4 = x0_q4;
for (x = 0; x < w; ++x) {
const uint16_t *const src_x = &src[x_q4 >> SUBPEL_BITS];
const int16_t *const x_filter = x_filters[x_q4 & SUBPEL_MASK];
int k, sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k)
sum += src_x[k] * x_filter[k];
dst[x] = ROUND_POWER_OF_TWO(dst[x] +
clip_pixel_high(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1);
x_q4 += x_step_q4;
}
src += src_stride;
dst += dst_stride;
}
}
static void high_convolve_vert(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const InterpKernel *y_filters,
int y0_q4, int y_step_q4, int w, int h,
int bd) {
int x, y;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
for (x = 0; x < w; ++x) {
int y_q4 = y0_q4;
for (y = 0; y < h; ++y) {
const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
int k, sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k)
sum += src_y[k * src_stride] * y_filter[k];
dst[y * dst_stride] = clip_pixel_high(
ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd);
y_q4 += y_step_q4;
}
++src;
++dst;
}
}
static void high_convolve_avg_vert(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const InterpKernel *y_filters,
int y0_q4, int y_step_q4, int w, int h,
int bd) {
int x, y;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
src -= src_stride * (SUBPEL_TAPS / 2 - 1);
for (x = 0; x < w; ++x) {
int y_q4 = y0_q4;
for (y = 0; y < h; ++y) {
const uint16_t *src_y = &src[(y_q4 >> SUBPEL_BITS) * src_stride];
const int16_t *const y_filter = y_filters[y_q4 & SUBPEL_MASK];
int k, sum = 0;
for (k = 0; k < SUBPEL_TAPS; ++k)
sum += src_y[k * src_stride] * y_filter[k];
dst[y * dst_stride] = ROUND_POWER_OF_TWO(dst[y * dst_stride] +
clip_pixel_high(ROUND_POWER_OF_TWO(sum, FILTER_BITS), bd), 1);
y_q4 += y_step_q4;
}
++src;
++dst;
}
}
static void high_convolve(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const InterpKernel *const x_filters,
int x0_q4, int x_step_q4,
const InterpKernel *const y_filters,
int y0_q4, int y_step_q4,
int w, int h, int bd) {
// Note: Fixed size intermediate buffer, temp, places limits on parameters.
// 2d filtering proceeds in 2 steps:
// (1) Interpolate horizontally into an intermediate buffer, temp.
// (2) Interpolate temp vertically to derive the sub-pixel result.
// Deriving the maximum number of rows in the temp buffer (135):
// --Smallest scaling factor is x1/2 ==> y_step_q4 = 32 (Normative).
// --Largest block size is 64x64 pixels.
// --64 rows in the downscaled frame span a distance of (64 - 1) * 32 in the
// original frame (in 1/16th pixel units).
// --Must round-up because block may be located at sub-pixel position.
// --Require an additional SUBPEL_TAPS rows for the 8-tap filter tails.
// --((64 - 1) * 32 + 15) >> 4 + 8 = 135.
uint16_t temp[64 * 135];
int intermediate_height = (((h - 1) * y_step_q4 + 15) >> 4) + SUBPEL_TAPS;
assert(w <= 64);
assert(h <= 64);
assert(y_step_q4 <= 32);
assert(x_step_q4 <= 32);
if (intermediate_height < h)
intermediate_height = h;
high_convolve_horiz(src - src_stride * (SUBPEL_TAPS / 2 - 1),
src_stride, CONVERT_TO_BYTEPTR(temp), 64,
x_filters, x0_q4, x_step_q4, w,
intermediate_height, bd);
high_convolve_vert(CONVERT_TO_BYTEPTR(temp) + 64 * (SUBPEL_TAPS / 2 - 1),
64, dst, dst_stride, y_filters, y0_q4, y_step_q4,
w, h, bd);
}
void vp9_high_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
const InterpKernel *const filters_x = get_filter_base(filter_x);
const int x0_q4 = get_filter_offset(filter_x, filters_x);
(void)filter_y;
(void)y_step_q4;
high_convolve_horiz(src, src_stride, dst, dst_stride, filters_x,
x0_q4, x_step_q4, w, h, bd);
}
void vp9_high_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
const InterpKernel *const filters_x = get_filter_base(filter_x);
const int x0_q4 = get_filter_offset(filter_x, filters_x);
(void)filter_y;
(void)y_step_q4;
high_convolve_avg_horiz(src, src_stride, dst, dst_stride, filters_x,
x0_q4, x_step_q4, w, h, bd);
}
void vp9_high_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
const InterpKernel *const filters_y = get_filter_base(filter_y);
const int y0_q4 = get_filter_offset(filter_y, filters_y);
(void)filter_x;
(void)x_step_q4;
high_convolve_vert(src, src_stride, dst, dst_stride, filters_y,
y0_q4, y_step_q4, w, h, bd);
}
void vp9_high_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
const InterpKernel *const filters_y = get_filter_base(filter_y);
const int y0_q4 = get_filter_offset(filter_y, filters_y);
(void)filter_x;
(void)x_step_q4;
high_convolve_avg_vert(src, src_stride, dst, dst_stride, filters_y,
y0_q4, y_step_q4, w, h, bd);
}
void vp9_high_convolve8_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
const InterpKernel *const filters_x = get_filter_base(filter_x);
const int x0_q4 = get_filter_offset(filter_x, filters_x);
const InterpKernel *const filters_y = get_filter_base(filter_y);
const int y0_q4 = get_filter_offset(filter_y, filters_y);
high_convolve(src, src_stride, dst, dst_stride,
filters_x, x0_q4, x_step_q4,
filters_y, y0_q4, y_step_q4, w, h, bd);
}
void vp9_high_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd) {
// Fixed size intermediate buffer places limits on parameters.
DECLARE_ALIGNED_ARRAY(16, uint16_t, temp, 64 * 64);
assert(w <= 64);
assert(h <= 64);
vp9_high_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64,
filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd);
vp9_high_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride,
NULL, 0, NULL, 0, w, h, bd);
}
void vp9_high_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h, int bd) {
int r;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
(void)filter_x;
(void)filter_y;
(void)filter_x_stride;
(void)filter_y_stride;
(void)bd;
for (r = h; r > 0; --r) {
vpx_memcpy(dst, src, w * sizeof(uint16_t));
src += src_stride;
dst += dst_stride;
}
}
void vp9_high_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride,
uint8_t *dst8, ptrdiff_t dst_stride,
const int16_t *filter_x, int filter_x_stride,
const int16_t *filter_y, int filter_y_stride,
int w, int h, int bd) {
int x, y;
uint16_t *src = CONVERT_TO_SHORTPTR(src8);
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
(void)filter_x;
(void)filter_y;
(void)filter_x_stride;
(void)filter_y_stride;
(void)bd;
for (y = 0; y < h; ++y) {
for (x = 0; x < w; ++x) {
dst[x] = ROUND_POWER_OF_TWO(dst[x] + src[x], 1);
}
src += src_stride;
dst += dst_stride;
}
}
#endif
......@@ -23,6 +23,14 @@ typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
const int16_t *filter_y, int y_step_q4,
int w, int h);
#if CONFIG_VP9_HIGHBITDEPTH
typedef void (*high_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
const int16_t *filter_x, int x_step_q4,
const int16_t *filter_y, int y_step_q4,
int w, int h, int bd);
#endif
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -63,6 +63,53 @@ static void build_mc_border(const uint8_t *src, int src_stride,
} while (--b_h);
}
#if CONFIG_VP9_HIGHBITDEPTH
static void high_build_mc_border(const uint8_t *src8, int src_stride,
uint16_t *dst, int dst_stride,
int x, int y, int b_w, int b_h,
int w, int h) {
// Get a pointer to the start of the real data for this row.
const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
const uint16_t *ref_row = src - x - y * src_stride;
if (y >= h)
ref_row += (h - 1) * src_stride;
else if (y > 0)
ref_row += y * src_stride;
do {
int right = 0, copy;
int left = x < 0 ? -x : 0;
if (left > b_w)
left = b_w;
if (x + b_w > w)
right = x + b_w - w;
if (right > b_w)
right = b_w;
copy = b_w - left - right;
if (left)
vpx_memset16(dst, ref_row[0], left);
if (copy)
memcpy(dst + left, ref_row + x + left, copy * sizeof(uint16_t));
if (right)
vpx_memset16(dst + left + copy, ref_row[w - 1], right);
dst += dst_stride;
++y;
if (y > 0 && y < h)
ref_row += src_stride;
} while (--b_h);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
static void inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
......@@ -97,6 +144,42 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4);
}
#if CONFIG_VP9_HIGHBITDEPTH
static void high_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const int subpel_x,
const int subpel_y,
const struct scale_factors *sf,
int w, int h, int ref,
const InterpKernel *kernel,
int xs, int ys, int bd) {
sf->high_predict[subpel_x != 0][subpel_y != 0][ref](
src, src_stride, dst, dst_stride,
kernel[subpel_x], xs, kernel[subpel_y], ys, w, h, bd);
}
void vp9_high_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const MV *src_mv,
const struct scale_factors *sf,
int w, int h, int ref,
const InterpKernel *kernel,
enum mv_precision precision,
int x, int y, int bd) {
const int is_q4 = precision == MV_PRECISION_Q4;
const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
is_q4 ? src_mv->col : src_mv->col * 2 };
MV32 mv = vp9_scale_mv(&mv_q4, x, y, sf);
const int subpel_x = mv.col & SUBPEL_MASK;
const int subpel_y = mv.row & SUBPEL_MASK;
src += (mv.row >> SUBPEL_BITS) * src_stride + (mv.col >> SUBPEL_BITS);
high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y,
sf, w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
}
#endif // CONFIG_VP9_HIGHBITDEPTH
static INLINE int round_mv_comp_q4(int value) {
return (value < 0 ? value - 2 : value + 2) / 4;
}
......@@ -222,9 +305,20 @@ static void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride
+ (scaled_mv.col >> SUBPEL_BITS);
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys,
xd->bd);
} else {
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
}
#else
inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride,
subpel_x, subpel_y, sf, w, h, ref, kernel, xs, ys);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
......@@ -393,16 +487,64 @@ static void dec_build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
y0 < 0 || y0 > frame_height - 1 || y1 < 0 || y1 > frame_height - 1) {
uint8_t *buf_ptr1 = ref_frame + y0 * pre_buf->stride + x0;
// Extend the border.
build_mc_border(buf_ptr1, pre_buf->stride, xd->mc_buf, x1 - x0 + 1,
x0, y0, x1 - x0 + 1, y1 - y0 + 1, frame_width,
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
high_build_mc_border(buf_ptr1,
pre_buf->stride,
xd->mc_buf_high,
x1 - x0 + 1,
x0,
y0,
x1 - x0 + 1,
y1 - y0 + 1,
frame_width,
frame_height);
buf_stride = x1 - x0 + 1;
buf_ptr = CONVERT_TO_BYTEPTR(xd->mc_buf_high) +
y_pad * 3 * buf_stride + x_pad * 3;
} else {
build_mc_border(buf_ptr1,
pre_buf->stride,
xd->mc_buf,
x1 - x0 + 1,
x0,
y0,
x1 - x0 + 1,
y1 - y0 + 1,
frame_width,
frame_height);
buf_stride = x1 - x0 + 1;
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
}
#else
build_mc_border(buf_ptr1,
pre_buf->stride,
xd->mc_buf,
x1 - x0 + 1,
x0,
y0,
x1 - x0 + 1,
y1 - y0 + 1,
frame_width,
frame_height);
buf_stride = x1 - x0 + 1;
buf_ptr = xd->mc_buf + y_pad * 3 * buf_stride + x_pad * 3;
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
#if CONFIG_VP9_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
} else {
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys);
}
#else
inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
subpel_y, sf, w, h, ref, kernel, xs, ys);
#endif // CONFIG_VP9_HIGHBITDEPTH
}
}
......
......@@ -39,6 +39,17 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
enum mv_precision precision,
int x, int y);
#if CONFIG_VP9_HIGHBITDEPTH
void vp9_high_build_inter_predictor(const uint8_t *src, int src_stride,
uint8_t *dst, int dst_stride,
const MV *mv_q3,
const struct scale_factors *sf,
int w, int h, int do_avg,
const InterpKernel *kernel,
enum mv_precision precision,
int x, int y, int bd);
#endif
static INLINE int scaled_buffer_offset(int x_offset, int y_offset, int stride,
const struct scale_factors *sf) {
const int x = sf ? sf->scale_value_x(x_offset, sf) : x_offset;
......
......@@ -606,6 +606,33 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
add_proto qw/void vp9_high_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bps";
specialize qw/vp9_high_dc_128_predictor_32x32/;
#
# Sub Pixel Filters
#
add_proto qw/void vp9_high_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve_copy/;
add_proto qw/void vp9_high_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve_avg/;
add_proto qw/void vp9_high_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve8/, "$sse2_x86_64";
add_proto qw/void vp9_high_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve8_horiz/, "$sse2_x86_64";
add_proto qw/void vp9_high_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve8_vert/, "$sse2_x86_64";
add_proto qw/void vp9_high_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve8_avg/, "$sse2_x86_64";
add_proto qw/void vp9_high_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve8_avg_horiz/, "$sse2_x86_64";
add_proto qw/void vp9_high_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
specialize qw/vp9_high_convolve8_avg_vert/, "$sse2_x86_64";
#
# dct
#
......
......@@ -43,9 +43,16 @@ MV32 vp9_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
return res;
}
#if CONFIG_VP9_HIGHBITDEPTH
void vp9_setup_scale_factors_for_frame(struct scale_factors *sf,
int other_w, int other_h,
int this_w, int this_h,
int use_high) {
#else
void vp9_setup_scale_factors_for_frame(struct scale_factors *sf,
int other_w, int other_h,
int this_w, int this_h) {
#endif
if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
sf->x_scale_fp = REF_INVALID_SCALE;
sf->y_scale_fp = REF_INVALID_SCALE;
......@@ -111,4 +118,48 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *sf,
// 2D subpel motion always gets filtered in both directions
sf->predict[1][1][0] = vp9_convolve8;
sf->predict[1][1][1] = vp9_convolve8_avg;
#if CONFIG_VP9_HIGHBITDEPTH
if (use_high) {
if (sf->x_step_q4 == 16) {
if (sf->y_step_q4 == 16) {
// No scaling in either direction.
sf->high_predict[0][0][0] = vp9_high_convolve_copy;
sf->high_predict[0][0][1] = vp9_high_convolve_avg;
sf->high_predict[0][1][0] = vp9_high_convolve8_vert;
sf->high_predict[0][1][1] = vp9_high_convolve8_avg_vert;
sf->high_predict[1][0][0] = vp9_high_convolve8_horiz;
sf->high_predict[1][0][1] = vp9_high_convolve8_avg_horiz;
} else {
// No scaling in x direction. Must always scale in the y direction.
sf->high_predict[0][0][0] = vp9_high_convolve8_vert;
sf->high_predict[0][0][1] = vp9_high_convolve8_avg_vert;
sf->high_predict[0][1][0] = vp9_high_convolve8_vert;
sf->high_predict[0][1][1] = vp9_high_convolve8_avg_vert;
sf->high_predict[1][0][0] = vp9_high_convolve8;
sf->high_predict[1][0][1] = vp9_high_convolve8_avg;
}
} else {
if (sf->y_step_q4 == 16) {
// No scaling in the y direction. Must always scale in the x direction.
sf->high_predict[0][0][0] = vp9_high_convolve8_horiz;
sf->high_predict[0][0][1] = vp9_high_convolve8_avg_horiz;
sf->high_predict[0][1][0] = vp9_high_convolve8;
sf->high_predict[0][1][1] = vp9_high_convolve8_avg;
sf->high_predict[1][0][0] = vp9_high_convolve8_horiz;
sf->high_predict[1][0][1] = vp9_high_convolve8_avg_horiz;
} else {
// Must always scale in both directions.