Commit 6f43ff58 authored by Jingning Han's avatar Jingning Han
Browse files

Make the use of pred buffers consistent in MB/SB

Use in-place buffers (dst of MACROBLOCKD) for  macroblock prediction.
This makes the macroblock buffer handling consistent with those of
superblock. Remove predictor buffer MACROBLOCKD.

Change-Id: Id1bcd898961097b1e6230c10f0130753a59fc6df
parent 38f62321
......@@ -290,7 +290,6 @@ typedef struct {
} MODE_INFO;
typedef struct blockd {
uint8_t *predictor;
int16_t *diff;
int16_t *dequant;
......@@ -354,7 +353,6 @@ struct mb_plane {
typedef struct macroblockd {
DECLARE_ALIGNED(16, int16_t, diff[64*64+32*32*2]); /* from idct diff */
DECLARE_ALIGNED(16, uint8_t, predictor[384]); // unused for superblocks
#if CONFIG_CODE_NONZEROCOUNT
DECLARE_ALIGNED(16, uint16_t, nzcs[256+64*2]);
#endif
......
......@@ -78,7 +78,6 @@ void vp9_setup_block_dptrs(MACROBLOCKD *mb) {
const int to = r * 4 + c;
const int from = r * 4 * 16 + c * 4;
blockd[to].diff = &mb->diff[from];
blockd[to].predictor = &mb->predictor[from];
}
}
......@@ -87,7 +86,6 @@ void vp9_setup_block_dptrs(MACROBLOCKD *mb) {
const int to = 16 + r * 2 + c;
const int from = 256 + r * 4 * 8 + c * 4;
blockd[to].diff = &mb->diff[from];
blockd[to].predictor = &mb->predictor[from];
}
}
......@@ -96,7 +94,6 @@ void vp9_setup_block_dptrs(MACROBLOCKD *mb) {
const int to = 20 + r * 2 + c;
const int from = 320 + r * 4 * 8 + c * 4;
blockd[to].diff = &mb->diff[from];
blockd[to].predictor = &mb->predictor[from];
}
}
......
......@@ -32,22 +32,22 @@ static INLINE void recon(int rows, int cols,
void vp9_recon_b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
recon(4, 4, pred_ptr, 16, diff_ptr, 16, dst_ptr, stride);
recon(4, 4, pred_ptr, stride, diff_ptr, 16, dst_ptr, stride);
}
void vp9_recon_uv_b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
recon(4, 4, pred_ptr, 8, diff_ptr, 8, dst_ptr, stride);
recon(4, 4, pred_ptr, stride, diff_ptr, 8, dst_ptr, stride);
}
void vp9_recon4b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
recon(4, 16, pred_ptr, 16, diff_ptr, 16, dst_ptr, stride);
recon(4, 16, pred_ptr, stride, diff_ptr, 16, dst_ptr, stride);
}
void vp9_recon2b_c(uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr,
int stride) {
recon(4, 8, pred_ptr, 8, diff_ptr, 8, dst_ptr, stride);
recon(4, 8, pred_ptr, stride, diff_ptr, 8, dst_ptr, stride);
}
void vp9_recon_sby_s_c(MACROBLOCKD *mb, uint8_t *dst,
......@@ -95,7 +95,8 @@ void vp9_recon_mby_c(MACROBLOCKD *xd) {
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
vp9_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp9_recon4b(*(b->base_dst) + b->dst, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
}
}
......@@ -104,13 +105,13 @@ void vp9_recon_mb_c(MACROBLOCKD *xd) {
for (i = 0; i < 16; i += 4) {
BLOCKD *b = &xd->block[i];
vp9_recon4b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp9_recon4b(*(b->base_dst) + b->dst, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
}
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
vp9_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp9_recon2b(*(b->base_dst) + b->dst, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
}
}
......@@ -399,7 +399,7 @@ static void build_2x1_inter_predictor_wh(const BLOCKD *d0, const BLOCKD *d1,
int row, int col) {
struct scale_factors * scale = &s[which_mv];
assert(d1->predictor - d0->predictor == block_size);
assert(d1->dst - d0->dst == block_size);
assert(d1->pre == d0->pre + block_size);
scale->set_scaled_offsets(scale, row, col);
......@@ -446,11 +446,11 @@ static void build_2x1_inter_predictor(const BLOCKD *d0, const BLOCKD *d1,
int block_size, int stride,
int which_mv, int weight,
const struct subpix_fn_table *subpix,
int row, int col, int use_dst) {
uint8_t *d0_predictor = use_dst ? *(d0->base_dst) + d0->dst : d0->predictor;
uint8_t *d1_predictor = use_dst ? *(d1->base_dst) + d1->dst : d1->predictor;
int row, int col) {
uint8_t *d0_predictor = *(d0->base_dst) + d0->dst;
uint8_t *d1_predictor = *(d1->base_dst) + d1->dst;
struct scale_factors * scale = &s[which_mv];
stride = use_dst ? d0->dst_stride : stride;
stride = d0->dst_stride;
assert(d1_predictor - d0_predictor == block_size);
assert(d1->pre == d0->pre + block_size);
......@@ -1338,8 +1338,7 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
}
static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
int mb_row, int mb_col,
int use_dst) {
int mb_row, int mb_col) {
int i;
MB_MODE_INFO * mbmi = &xd->mode_info_context->mbmi;
BLOCKD *blockd = xd->block;
......@@ -1368,8 +1367,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
build_2x1_inter_predictor(d0, d1, xd->scale_factor, 8, 16, which_mv,
which_mv ? weight : 0,
&xd->subpix, mb_row * 16 + y, mb_col * 16,
use_dst);
&xd->subpix, mb_row * 16 + y, mb_col * 16);
}
}
} else {
......@@ -1386,8 +1384,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
build_2x1_inter_predictor(d0, d1, xd->scale_factor, 4, 16, which_mv,
which_mv ? weight : 0,
&xd->subpix,
mb_row * 16 + y, mb_col * 16 + x,
use_dst);
mb_row * 16 + y, mb_col * 16 + x);
}
}
}
......@@ -1405,8 +1402,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd,
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
which_mv ? weight : 0, &xd->subpix,
mb_row * 8 + y, mb_col * 8 + x,
use_dst);
mb_row * 8 + y, mb_col * 8 + x);
}
}
}
......@@ -1493,58 +1489,17 @@ static void build_4x4uvmvs(MACROBLOCKD *xd) {
}
}
void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
uint8_t *dst_y,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
int dst_uvstride,
int mb_row,
int mb_col) {
vp9_build_inter16x16_predictors_mby(xd, dst_y, dst_ystride, mb_row, mb_col);
vp9_build_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride,
mb_row, mb_col);
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd, dst_y, dst_u, dst_v,
dst_ystride, dst_uvstride);
}
#endif
}
void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
int mb_row,
int mb_col) {
if (xd->mode_info_context->mbmi.mode != SPLITMV) {
// TODO(jingning): to be replaced with vp9_build_inter_predictors_sb() when
// converting buffers from predictors to dst.
vp9_build_inter16x16_predictors_mb(xd, xd->predictor,
&xd->predictor[256],
&xd->predictor[320], 16, 8,
mb_row, mb_col);
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
} else {
build_4x4uvmvs(xd);
build_inter4x4_predictors_mb(xd, mb_row, mb_col, 0);
build_inter4x4_predictors_mb(xd, mb_row, mb_col);
}
}
void vp9_build_inter_predictors_mb_s(MACROBLOCKD *xd,
int mb_row,
int mb_col) {
if (xd->mode_info_context->mbmi.mode != SPLITMV) {
vp9_build_inter16x16_predictors_mb(xd, xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride,
mb_row, mb_col);
} else {
build_4x4uvmvs(xd);
build_inter4x4_predictors_mb(xd, mb_row, mb_col, 1);
}
}
/*encoder only*/
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
int mb_row, int mb_col) {
......@@ -1593,8 +1548,7 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
for (which_mv = 0; which_mv < 1 + use_second_ref; ++which_mv) {
build_2x1_inter_predictor(d0, d1, xd->scale_factor_uv, 4, 8, which_mv,
which_mv ? weight : 0,
&xd->subpix, mb_row * 8 + y, mb_col * 8 + x,
0);
&xd->subpix, mb_row * 8 + y, mb_col * 8 + x);
}
}
}
......@@ -29,14 +29,20 @@ void vp9_build_inter16x16_predictors_mbuv(MACROBLOCKD *xd,
int mb_row,
int mb_col);
void vp9_build_inter16x16_predictors_mb(MACROBLOCKD *xd,
uint8_t *dst_y,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_ystride,
int dst_uvstride,
int mb_row,
int mb_col);
void vp9_build_inter_predictors_sby(MACROBLOCKD *x,
uint8_t *dst_y,
int dst_ystride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize);
void vp9_build_inter_predictors_sbuv(MACROBLOCKD *x,
uint8_t *dst_u,
uint8_t *dst_v,
int dst_uvstride,
int mb_row,
int mb_col,
BLOCK_SIZE_TYPE bsize);
void vp9_build_inter_predictors_sb(MACROBLOCKD *mb,
int mb_row, int mb_col,
......@@ -46,10 +52,6 @@ void vp9_build_inter_predictors_mb(MACROBLOCKD *xd,
int mb_row,
int mb_col);
void vp9_build_inter_predictors_mb_s(MACROBLOCKD *xd,
int mb_row,
int mb_col);
void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd,
int mb_row,
int mb_col);
......
......@@ -273,7 +273,8 @@ void vp9_recon_intra_mbuv(MACROBLOCKD *xd) {
int i;
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &xd->block[i];
vp9_recon2b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp9_recon2b(*(b->base_dst) + b->dst, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
}
}
......@@ -758,40 +759,6 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd,
xd->left_available, xd->right_available);
}
// TODO(jingning): merge mby and mbuv into the above sby and sbmu functions
void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) {
vp9_build_intra_predictors(xd->dst.y_buffer, xd->dst.y_stride,
xd->predictor, 16,
xd->mode_info_context->mbmi.mode,
16, 16,
xd->up_available, xd->left_available,
xd->right_available);
}
void vp9_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd,
uint8_t *upred_ptr,
uint8_t *vpred_ptr,
int uv_stride,
int mode, int bsize) {
vp9_build_intra_predictors(xd->dst.u_buffer, xd->dst.uv_stride,
upred_ptr, uv_stride, mode,
bsize, bsize,
xd->up_available, xd->left_available,
xd->right_available);
vp9_build_intra_predictors(xd->dst.v_buffer, xd->dst.uv_stride,
vpred_ptr, uv_stride, mode,
bsize, bsize,
xd->up_available, xd->left_available,
xd->right_available);
}
void vp9_build_intra_predictors_mbuv(MACROBLOCKD *xd) {
vp9_build_intra_predictors_mbuv_internal(xd, &xd->predictor[256],
&xd->predictor[320], 8,
xd->mode_info_context->mbmi.uv_mode,
8);
}
void vp9_intra8x8_predict(MACROBLOCKD *xd,
BLOCKD *b,
int mode,
......
......@@ -68,11 +68,15 @@ specialize vp9_recon_b
prototype void vp9_recon_uv_b "uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr, int stride"
specialize vp9_recon_uv_b
# TODO(jingning): The prototype functions in c are modified to enable block-size configurable
# operations. Need to change the sse2 accrodingly.
prototype void vp9_recon2b "uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr, int stride"
specialize vp9_recon2b sse2
specialize vp9_recon2b
# specialize vp9_recon2b sse2
prototype void vp9_recon4b "uint8_t *pred_ptr, int16_t *diff_ptr, uint8_t *dst_ptr, int stride"
specialize vp9_recon4b sse2
specialize vp9_recon4b
# specialize vp9_recon4b sse2
prototype void vp9_recon_mb "struct macroblockd *x"
specialize vp9_recon_mb
......@@ -86,17 +90,14 @@ specialize vp9_recon_sby_s
prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst, enum BLOCK_SIZE_TYPE bsize"
specialize void vp9_recon_sbuv_s
prototype void vp9_build_intra_predictors "uint8_t *src, int src_stride, uint8_t *pred, int y_stride, int mode, int bw, int bh, int up_available, int left_available, int right_available"
specialize void vp9_build_intra_predictors
prototype void vp9_build_intra_predictors_sby_s "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
specialize vp9_build_intra_predictors_sby_s;
specialize vp9_build_intra_predictors_sby_s
prototype void vp9_build_intra_predictors_sbuv_s "struct macroblockd *x, enum BLOCK_SIZE_TYPE bsize"
specialize vp9_build_intra_predictors_sbuv_s;
prototype void vp9_build_intra_predictors_mby "struct macroblockd *x"
specialize vp9_build_intra_predictors_mby;
prototype void vp9_build_intra_predictors_mbuv "struct macroblockd *x"
specialize vp9_build_intra_predictors_mbuv;
specialize vp9_build_intra_predictors_sbuv_s
prototype void vp9_intra4x4_predict "struct macroblockd *xd, struct blockd *x, int b_mode, uint8_t *predictor, int pre_stride"
specialize vp9_intra4x4_predict;
......@@ -620,16 +621,10 @@ specialize vp9_block_error mmx sse2
vp9_block_error_sse2=vp9_block_error_xmm
prototype void vp9_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp9_subtract_b mmx sse2
prototype void vp9_subtract_b "struct block *be, struct blockd *bd, int pitch"
specialize vp9_subtract_b mmx sse2
prototype void vp9_subtract_mby "int16_t *diff, uint8_t *src, uint8_t *pred, int stride"
specialize vp9_subtract_mby mmx sse2
prototype void vp9_subtract_mbuv "int16_t *diff, uint8_t *usrc, uint8_t *vsrc, uint8_t *pred, int stride"
specialize vp9_subtract_mbuv mmx sse2
# TODO(jingning): The prototype function in c has been changed to remove
# the use of predictor buffer in MACROBLOCKD. Need to modify the mmx and sse2
# versions accordingly.
specialize vp9_subtract_b
#
# Structured Similarity (SSIM)
......
......@@ -73,15 +73,15 @@ static void build_intra_predictors_mbuv_x86(MACROBLOCKD *xd,
}
void vp9_build_intra_predictors_mbuv_sse2(MACROBLOCKD *xd) {
build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
&xd->predictor[320], 8,
build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
xd->dst.v_buffer, xd->dst.uv_stride,
vp9_intra_pred_uv_tm_sse2,
vp9_intra_pred_uv_ho_mmx2);
}
void vp9_build_intra_predictors_mbuv_ssse3(MACROBLOCKD *xd) {
build_intra_predictors_mbuv_x86(xd, &xd->predictor[256],
&xd->predictor[320], 8,
build_intra_predictors_mbuv_x86(xd, xd->dst.u_buffer,
xd->dst.v_buffer, xd->dst.uv_stride,
vp9_intra_pred_uv_tm_ssse3,
vp9_intra_pred_uv_ho_ssse3);
}
......
......@@ -645,7 +645,7 @@ static void decode_mb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->mode_info_context->mbmi.mode, tx_size,
xd->mode_info_context->mbmi.interp_filter);
#endif
vp9_build_inter_predictors_mb_s(xd, mb_row, mb_col);
vp9_build_inter_predictors_mb(xd, mb_row, mb_col);
}
if (xd->mode_info_context->mbmi.mb_skip_coeff) {
......
......@@ -1405,9 +1405,9 @@ static void encode_frame_internal(VP9_COMP *cpi) {
MACROBLOCKD *const xd = &x->e_mbd;
int totalrate;
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
// fprintf(stderr, "encode_frame_internal frame %d (%d) type %d\n",
// cpi->common.current_video_frame, cpi->common.show_frame,
// cm->frame_type);
// Compute a modified set of reference frame probabilities to use when
// prediction fails. These are based on the current general estimates for
......@@ -2230,15 +2230,8 @@ static void encode_macroblock(VP9_COMP *cpi, TOKENEXTRA **t,
if (!x->skip) {
vp9_encode_inter16x16(cm, x, mb_row, mb_col);
} else {
vp9_build_inter16x16_predictors_mb(xd,
xd->dst.y_buffer,
xd->dst.u_buffer,
xd->dst.v_buffer,
xd->dst.y_stride,
xd->dst.uv_stride,
mb_row, mb_col);
vp9_build_inter_predictors_sb(xd, mb_row, mb_col, BLOCK_SIZE_MB16X16);
#if CONFIG_COMP_INTERINTRA_PRED
if (xd->mode_info_context->mbmi.second_ref_frame == INTRA_FRAME) {
vp9_build_interintra_16x16_predictors_mb(xd,
......
......@@ -52,7 +52,8 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) {
b->bmi.as_mode.context = vp9_find_bpred_context(&x->e_mbd, b);
#endif
vp9_intra4x4_predict(&x->e_mbd, b, b->bmi.as_mode.first, b->predictor, 16);
vp9_intra4x4_predict(&x->e_mbd, b, b->bmi.as_mode.first,
*(b->base_dst) + b->dst, b->dst_stride);
vp9_subtract_b(be, b, 16);
tx_type = get_tx_type_4x4(&x->e_mbd, ib);
......@@ -69,7 +70,8 @@ static void encode_intra4x4block(MACROBLOCK *x, int ib) {
b->diff, 32);
}
vp9_recon_b(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
vp9_recon_b(*(b->base_dst) + b->dst, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
}
void vp9_encode_intra4x4mby(MACROBLOCK *mb) {
......@@ -81,12 +83,13 @@ void vp9_encode_intra4x4mby(MACROBLOCK *mb) {
void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
BLOCK *b = &x->block[0];
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
vp9_build_intra_predictors_mby(xd);
vp9_subtract_mby(x->src_diff, *(b->base_src), xd->predictor, b->src_stride);
vp9_build_intra_predictors_sby_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sby_s_c(x->src_diff,
x->src.y_buffer, x->src.y_stride,
xd->dst.y_buffer, xd->dst.y_stride,
BLOCK_SIZE_MB16X16);
switch (tx_size) {
case TX_16X16:
......@@ -119,10 +122,11 @@ void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
vp9_build_intra_predictors_mbuv(xd);
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
xd->predictor, x->src.uv_stride);
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv_s_c(x->src_diff,
x->src.u_buffer, x->src.v_buffer, x->src.uv_stride,
xd->dst.u_buffer, xd->dst.v_buffer, xd->dst.uv_stride,
BLOCK_SIZE_MB16X16);
switch (tx_size) {
case TX_4X4:
......@@ -152,7 +156,8 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
int i;
TX_TYPE tx_type;
vp9_intra8x8_predict(xd, b, b->bmi.as_mode.first, b->predictor, 16);
vp9_intra8x8_predict(xd, b, b->bmi.as_mode.first,
*(b->base_dst) + b->dst, b->dst_stride);
// generate residual blocks
vp9_subtract_4b_c(be, b, 16);
......@@ -206,7 +211,7 @@ void vp9_encode_intra8x8(MACROBLOCK *x, int ib) {
// reconstruct submacroblock
for (i = 0; i < 4; i++) {
b = &xd->block[ib + iblock[i]];
vp9_recon_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
vp9_recon_b_c(*(b->base_dst) + b->dst, b->diff, *(b->base_dst) + b->dst,
b->dst_stride);
}
}
......@@ -227,7 +232,8 @@ static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) {
const int block = ib < 20 ? ib - 16 : ib - 20;
assert(ib >= 16 && ib < 24);
vp9_intra_uv4x4_predict(&x->e_mbd, b, mode, b->predictor, 8);
vp9_intra_uv4x4_predict(&x->e_mbd, b, mode,
*(b->base_dst) + b->dst, b->dst_stride);
vp9_subtract_b(be, b, 8);
......@@ -236,7 +242,7 @@ static void encode_intra_uv4x4(MACROBLOCK *x, int ib, int mode) {
vp9_inverse_transform_b_4x4(&x->e_mbd, xd->plane[plane].eobs[block],
dqcoeff, b->diff, 16);
vp9_recon_uv_b_c(b->predictor, b->diff, *(b->base_dst) + b->dst,
vp9_recon_uv_b_c(*(b->base_dst) + b->dst, b->diff, *(b->base_dst) + b->dst,
b->dst_stride);
}
......
......@@ -23,8 +23,9 @@
void vp9_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
uint8_t *src_ptr = (*(be->base_src) + be->src);
int16_t *diff_ptr = be->src_diff;
uint8_t *pred_ptr = bd->predictor;
uint8_t *pred_ptr = *(bd->base_dst) + bd->dst;
int src_stride = be->src_stride;
int dst_stride = bd->dst_stride;
int r, c;
......@@ -33,7 +34,7 @@ void vp9_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
diff_ptr[c] = src_ptr[c] - pred_ptr[c];
diff_ptr += pitch;
pred_ptr += pitch;
pred_ptr += dst_stride;
src_ptr += src_stride;
}
}
......@@ -41,8 +42,9 @@ void vp9_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) {
void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
uint8_t *src_ptr = (*(be->base_src) + be->src);
int16_t *diff_ptr = be->src_diff;
uint8_t *pred_ptr = bd->predictor;
uint8_t *pred_ptr = *(bd->base_dst) + bd->dst;
int src_stride = be->src_stride;
int dst_stride = bd->dst_stride;
int r, c;
for (r = 0; r < 8; r++) {
......@@ -50,7 +52,7 @@ void vp9_subtract_4b_c(BLOCK *be, BLOCKD *bd, int pitch) {
diff_ptr[c] = src_ptr[c] - pred_ptr[c];
diff_ptr += pitch;
pred_ptr += pitch;
pred_ptr += dst_stride;
src_ptr += src_stride;
}
}
......@@ -102,25 +104,15 @@ void vp9_subtract_sbuv_s_c(int16_t *diff, const uint8_t *usrc,
}
}
void vp9_subtract_mby_c(int16_t *diff, uint8_t *src,
uint8_t *pred, int stride) {
vp9_subtract_sby_s_c(diff, src, stride, pred, 16, BLOCK_SIZE_MB16X16);
}
void vp9_subtract_mbuv_c(int16_t *diff, uint8_t *usrc,
uint8_t *vsrc, uint8_t *pred, int stride) {
uint8_t *upred = pred + 256;
uint8_t *vpred = pred + 320;
vp9_subtract_sbuv_s_c(diff, usrc, vsrc, stride, upred, vpred, 8,
BLOCK_SIZE_MB16X16);
}
static void subtract_mb(MACROBLOCK *x) {
vp9_subtract_mby(x->src_diff, x->src.y_buffer, x->e_mbd.predictor,
x->src.y_stride);
vp9_subtract_mbuv(x->src_diff, x->src.u_buffer, x->src.v_buffer,
x->e_mbd.predictor, x->src.uv_stride);
MACROBLOCKD *xd = &x->e_mbd;
vp9_subtract_sby_s_c(x->src_diff, x->src.y_buffer, x->src.y_stride,
xd->dst.y_buffer, xd->dst.y_stride,
BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv_s_c(x->src_diff, x->src.u_buffer, x->src.v_buffer,