Commit 39892cce authored by Paul Wilkins's avatar Paul Wilkins
Browse files

Code clean up.

References to MACROBLOCKD that use "x" changed to "xd"
to comply with convention elsewhere that x = MACROBLOCK
and xd = MACROBLOCKD.

Simplify some repeat references using local variables.

Change-Id: I0ba2e79536add08140a6c8b19698fcf5077246bc
parent 2f963917
......@@ -68,25 +68,27 @@ void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD
}
void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
if (x->mode_info_context->mbmi.mode != B_PRED &&
x->mode_info_context->mbmi.mode != I8X8_PRED &&
x->mode_info_context->mbmi.mode != SPLITMV) {
if (xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
/* do 2nd order transform on the dc block */
IDCT_INVOKE(rtcd, iwalsh16)(&x->block[24].dqcoeff[0], x->block[24].diff);
recon_dcblock(x);
IDCT_INVOKE(rtcd, iwalsh16)(&blockd[24].dqcoeff[0], blockd[24].diff);
recon_dcblock(xd);
}
for (i = 0; i < 16; i++) {
vp8_inverse_transform_b(rtcd, &x->block[i], 32);
vp8_inverse_transform_b(rtcd, &blockd[i], 32);
}
for (i = 16; i < 24; i++) {
vp8_inverse_transform_b(rtcd, &x->block[i], 16);
vp8_inverse_transform_b(rtcd, &blockd[i], 16);
}
}
......@@ -102,53 +104,65 @@ void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *inpu
}
void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_inverse_transform_mby_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
// do 2nd order transform on the dc block
IDCT_INVOKE(rtcd, ihaar2)(x->block[24].dqcoeff, x->block[24].diff, 8);
IDCT_INVOKE(rtcd, ihaar2)(blockd[24].dqcoeff, blockd[24].diff, 8);
recon_dcblock_8x8(x); // need to change for 8x8
recon_dcblock_8x8(xd); // need to change for 8x8
for (i = 0; i < 9; i += 8) {
vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 32);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 32);
}
for (i = 2; i < 11; i += 8) {
vp8_inverse_transform_b_8x8(rtcd, &x->block[i + 2].dqcoeff[0], &x->block[i].diff[0], 32);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
&blockd[i].diff[0], 32);
}
}
void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_inverse_transform_mbuv_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
for (i = 16; i < 24; i += 4) {
vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
}
void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
if (x->mode_info_context->mbmi.mode != B_PRED &&
x->mode_info_context->mbmi.mode != SPLITMV) {
if (xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV) {
// do 2nd order transform on the dc block
IDCT_INVOKE(rtcd, ihaar2)(&x->block[24].dqcoeff[0], x->block[24].diff, 8);// dqcoeff[0]
recon_dcblock_8x8(x); // need to change for 8x8
IDCT_INVOKE(rtcd, ihaar2)(&blockd[24].dqcoeff[0],
blockd[24].diff, 8);// dqcoeff[0]
recon_dcblock_8x8(xd); // need to change for 8x8
}
for (i = 0; i < 9; i += 8) {
vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 32);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 32);
}
for (i = 2; i < 11; i += 8) {
vp8_inverse_transform_b_8x8(rtcd, &x->block[i + 2].dqcoeff[0], &x->block[i].diff[0], 32);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i + 2].dqcoeff[0],
&blockd[i].diff[0], 32);
}
for (i = 16; i < 24; i += 4) {
vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
}
......@@ -160,26 +174,36 @@ void vp8_inverse_transform_b_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
IDCT_INVOKE(rtcd, idct16x16)(input_dqcoeff, output_coeff, pitch);
}
void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
vp8_inverse_transform_b_16x16(rtcd, &x->block[0].dqcoeff[0], &x->block[0].diff[0], 32);
void vp8_inverse_transform_mby_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
vp8_inverse_transform_b_16x16(rtcd, &xd->block[0].dqcoeff[0],
&xd->block[0].diff[0], 32);
}
// U,V blocks are 8x8 per macroblock, so just run 8x8
void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_inverse_transform_mbuv_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
for (i = 16; i < 24; i += 4)
vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_inverse_transform_mb_16x16(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
BLOCKD *blockd = xd->block;
// Luma
vp8_inverse_transform_b_16x16(rtcd, &x->block[0].dqcoeff[0], &x->block[0].diff[0], 32);
vp8_inverse_transform_b_16x16(rtcd, &blockd[0].dqcoeff[0],
&blockd[0].diff[0], 32);
// U, V
// Chroma blocks are downscaled, so run an 8x8 on them.
for (i = 16; i < 24; i+= 4)
vp8_inverse_transform_b_8x8(rtcd, &x->block[i].dqcoeff[0], &x->block[i].diff[0], 16);
vp8_inverse_transform_b_8x8(rtcd, &blockd[i].dqcoeff[0],
&blockd[i].diff[0], 16);
}
#endif
......@@ -41,77 +41,84 @@ static void setup_block
}
static void setup_macroblock(MACROBLOCKD *x, BLOCKSET bs) {
static void setup_macroblock(MACROBLOCKD *xd, BLOCKSET bs) {
int block;
unsigned char **y, **u, **v;
unsigned char **y2, **u2, **v2;
BLOCKD *blockd = xd->block;
int stride;
if (bs == DEST) {
y = &x->dst.y_buffer;
u = &x->dst.u_buffer;
v = &x->dst.v_buffer;
y = &xd->dst.y_buffer;
u = &xd->dst.u_buffer;
v = &xd->dst.v_buffer;
} else {
y = &x->pre.y_buffer;
u = &x->pre.u_buffer;
v = &x->pre.v_buffer;
y = &xd->pre.y_buffer;
u = &xd->pre.u_buffer;
v = &xd->pre.v_buffer;
y2 = &x->second_pre.y_buffer;
u2 = &x->second_pre.u_buffer;
v2 = &x->second_pre.v_buffer;
y2 = &xd->second_pre.y_buffer;
u2 = &xd->second_pre.u_buffer;
v2 = &xd->second_pre.v_buffer;
}
stride = xd->dst.y_stride;
for (block = 0; block < 16; block++) { /* y blocks */
setup_block(&x->block[block], x->dst.y_stride, y, y2, x->dst.y_stride,
(block >> 2) * 4 * x->dst.y_stride + (block & 3) * 4, bs);
setup_block(&blockd[block], stride, y, y2, stride,
(block >> 2) * 4 * stride + (block & 3) * 4, bs);
}
stride = xd->dst.uv_stride;
for (block = 16; block < 20; block++) { /* U and V blocks */
setup_block(&x->block[block], x->dst.uv_stride, u, u2, x->dst.uv_stride,
((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
setup_block(&blockd[block], stride, u, u2, stride,
((block - 16) >> 1) * 4 * stride + (block & 1) * 4, bs);
setup_block(&x->block[block + 4], x->dst.uv_stride, v, v2, x->dst.uv_stride,
((block - 16) >> 1) * 4 * x->dst.uv_stride + (block & 1) * 4, bs);
setup_block(&blockd[block + 4], stride, v, v2, stride,
((block - 16) >> 1) * 4 * stride + (block & 1) * 4, bs);
}
}
void vp8_setup_block_dptrs(MACROBLOCKD *x) {
void vp8_setup_block_dptrs(MACROBLOCKD *xd) {
int r, c;
BLOCKD *blockd = xd->block;
for (r = 0; r < 4; r++) {
for (c = 0; c < 4; c++) {
x->block[r * 4 + c].diff = &x->diff[r * 4 * 16 + c * 4];
x->block[r * 4 + c].predictor = x->predictor + r * 4 * 16 + c * 4;
blockd[r * 4 + c].diff = &xd->diff[r * 4 * 16 + c * 4];
blockd[r * 4 + c].predictor = xd->predictor + r * 4 * 16 + c * 4;
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[16 + r * 2 + c].diff = &x->diff[256 + r * 4 * 8 + c * 4];
x->block[16 + r * 2 + c].predictor = x->predictor + 256 + r * 4 * 8 + c * 4;
blockd[16 + r * 2 + c].diff = &xd->diff[256 + r * 4 * 8 + c * 4];
blockd[16 + r * 2 + c].predictor =
xd->predictor + 256 + r * 4 * 8 + c * 4;
}
}
for (r = 0; r < 2; r++) {
for (c = 0; c < 2; c++) {
x->block[20 + r * 2 + c].diff = &x->diff[320 + r * 4 * 8 + c * 4];
x->block[20 + r * 2 + c].predictor = x->predictor + 320 + r * 4 * 8 + c * 4;
blockd[20 + r * 2 + c].diff = &xd->diff[320 + r * 4 * 8 + c * 4];
blockd[20 + r * 2 + c].predictor =
xd->predictor + 320 + r * 4 * 8 + c * 4;
}
}
x->block[24].diff = &x->diff[384];
blockd[24].diff = &xd->diff[384];
for (r = 0; r < 25; r++) {
x->block[r].qcoeff = x->qcoeff + r * 16;
x->block[r].dqcoeff = x->dqcoeff + r * 16;
blockd[r].qcoeff = xd->qcoeff + r * 16;
blockd[r].dqcoeff = xd->dqcoeff + r * 16;
}
}
void vp8_build_block_doffsets(MACROBLOCKD *x) {
void vp8_build_block_doffsets(MACROBLOCKD *xd) {
/* handle the destination pitch features */
setup_macroblock(x, DEST);
setup_macroblock(x, PRED);
setup_macroblock(xd, DEST);
setup_macroblock(xd, PRED);
}
This diff is collapsed.
......@@ -196,24 +196,28 @@ void d153_predictor(unsigned char *ypred_ptr, int y_stride, int n,
}
}
void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x) {
void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) {
int i;
for (i = 16; i < 24; i += 2) {
BLOCKD *b = &x->block[i];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
BLOCKD *b = &xd->block[i];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff,
*(b->base_dst) + b->dst, b->dst_stride);
}
}
void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *x, unsigned char *ypred_ptr, int y_stride, int mode) {
void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
unsigned char *ypred_ptr,
int y_stride, int mode) {
unsigned char *yabove_row = x->dst.y_buffer - x->dst.y_stride;
unsigned char *yabove_row = xd->dst.y_buffer - xd->dst.y_stride;
unsigned char yleft_col[16];
unsigned char ytop_left = yabove_row[-1];
int r, c, i;
for (i = 0; i < 16; i++) {
yleft_col[i] = x->dst.y_buffer [i * x->dst.y_stride - 1];
yleft_col[i] = xd->dst.y_buffer [i * xd->dst.y_stride - 1];
}
/* for Y */
......@@ -225,19 +229,19 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *x, unsigned char *ypre
int average = 0;
if (x->up_available || x->left_available) {
if (x->up_available) {
if (xd->up_available || xd->left_available) {
if (xd->up_available) {
for (i = 0; i < 16; i++) {
average += yabove_row[i];
}
}
if (x->left_available) {
if (xd->left_available) {
for (i = 0; i < 16; i++) {
average += yleft_col[i];
}
}
shift = 3 + x->up_available + x->left_available;
shift = 3 + xd->up_available + xd->left_available;
expected_dc = (average + (1 << (shift - 1))) >> shift;
} else {
expected_dc = 128;
......@@ -329,49 +333,51 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *x, unsigned char *ypre
}
}
void vp8_build_intra_predictors_mby(MACROBLOCKD *x) {
vp8_build_intra_predictors_mby_internal(x, x->predictor, 16,
x->mode_info_context->mbmi.mode);
void vp8_build_intra_predictors_mby(MACROBLOCKD *xd) {
vp8_build_intra_predictors_mby_internal(xd, xd->predictor, 16,
xd->mode_info_context->mbmi.mode);
}
void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) {
vp8_build_intra_predictors_mby_internal(x, x->dst.y_buffer, x->dst.y_stride,
x->mode_info_context->mbmi.mode);
void vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd) {
vp8_build_intra_predictors_mby_internal(xd, xd->dst.y_buffer,
xd->dst.y_stride,
xd->mode_info_context->mbmi.mode);
}
#if CONFIG_COMP_INTRA_PRED
void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *x) {
void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
unsigned char predictor[2][256];
int i;
vp8_build_intra_predictors_mby_internal(x, predictor[0], 16,
x->mode_info_context->mbmi.mode);
vp8_build_intra_predictors_mby_internal(x, predictor[1], 16,
x->mode_info_context->mbmi.second_mode);
vp8_build_intra_predictors_mby_internal(
xd, predictor[0], 16, xd->mode_info_context->mbmi.mode);
vp8_build_intra_predictors_mby_internal(
xd, predictor[1], 16, xd->mode_info_context->mbmi.second_mode);
for (i = 0; i < 256; i++) {
x->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1;
xd->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1;
}
}
#endif
void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *x,
void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd,
unsigned char *upred_ptr,
unsigned char *vpred_ptr,
int uv_stride,
int mode) {
unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
YV12_BUFFER_CONFIG * dst = &xd->dst;
unsigned char *uabove_row = dst->u_buffer - dst->uv_stride;
unsigned char uleft_col[16];
unsigned char utop_left = uabove_row[-1];
unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
unsigned char *vabove_row = dst->v_buffer - dst->uv_stride;
unsigned char vleft_col[20];
unsigned char vtop_left = vabove_row[-1];
int i, j;
for (i = 0; i < 8; i++) {
uleft_col[i] = x->dst.u_buffer [i * x->dst.uv_stride - 1];
vleft_col[i] = x->dst.v_buffer [i * x->dst.uv_stride - 1];
uleft_col[i] = dst->u_buffer [i * dst->uv_stride - 1];
vleft_col[i] = dst->v_buffer [i * dst->uv_stride - 1];
}
switch (mode) {
......@@ -383,25 +389,25 @@ void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *x,
int Uaverage = 0;
int Vaverage = 0;
if (x->up_available) {
if (xd->up_available) {
for (i = 0; i < 8; i++) {
Uaverage += uabove_row[i];
Vaverage += vabove_row[i];
}
}
if (x->left_available) {
if (xd->left_available) {
for (i = 0; i < 8; i++) {
Uaverage += uleft_col[i];
Vaverage += vleft_col[i];
}
}
if (!x->up_available && !x->left_available) {
if (!xd->up_available && !xd->left_available) {
expected_udc = 128;
expected_vdc = 128;
} else {
shift = 2 + x->up_available + x->left_available;
shift = 2 + xd->up_available + xd->left_available;
expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
}
......@@ -512,49 +518,47 @@ void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *x,
}
}
void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x) {
vp8_build_intra_predictors_mbuv_internal(x,
&x->predictor[256],
&x->predictor[320],
8,
x->mode_info_context->mbmi.uv_mode);
void vp8_build_intra_predictors_mbuv(MACROBLOCKD *xd) {
vp8_build_intra_predictors_mbuv_internal(
xd, &xd->predictor[256], &xd->predictor[320],
8, xd->mode_info_context->mbmi.uv_mode);
}
void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x) {
vp8_build_intra_predictors_mbuv_internal(x,
x->dst.u_buffer,
x->dst.v_buffer,
x->dst.uv_stride,
x->mode_info_context->mbmi.uv_mode);
void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *xd) {
vp8_build_intra_predictors_mbuv_internal(
xd, xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->mode_info_context->mbmi.uv_mode);
}
#if CONFIG_COMP_INTRA_PRED
void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *x) {
void vp8_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
unsigned char predictor[2][2][64];
int i;
vp8_build_intra_predictors_mbuv_internal(x, predictor[0][0], predictor[1][0], 8,
x->mode_info_context->mbmi.uv_mode);
vp8_build_intra_predictors_mbuv_internal(x, predictor[0][1], predictor[1][1], 8,
x->mode_info_context->mbmi.second_uv_mode);
vp8_build_intra_predictors_mbuv_internal(
xd, predictor[0][0], predictor[1][0], 8,
xd->mode_info_context->mbmi.uv_mode);
vp8_build_intra_predictors_mbuv_internal(
xd, predictor[0][1], predictor[1][1], 8,
xd->mode_info_context->mbmi.second_uv_mode);
for (i = 0; i < 64; i++) {
x->predictor[256 + i] = (predictor[0][0][i] + predictor[0][1][i] + 1) >> 1;
x->predictor[256 + 64 + i] = (predictor[1][0][i] + predictor[1][1][i] + 1) >> 1;
xd->predictor[256 + i] = (predictor[0][0][i] + predictor[0][1][i] + 1) >> 1;
xd->predictor[256 + 64 + i] = (predictor[1][0][i] + predictor[1][1][i] + 1) >> 1;
}
}
#endif
void vp8_intra8x8_predict(BLOCKD *x,
void vp8_intra8x8_predict(BLOCKD *xd,
int mode,
unsigned char *predictor) {
unsigned char *yabove_row = *(x->base_dst) + x->dst - x->dst_stride;
unsigned char *yabove_row = *(xd->base_dst) + xd->dst - xd->dst_stride;
unsigned char yleft_col[8];
unsigned char ytop_left = yabove_row[-1];
int r, c, i;
for (i = 0; i < 8; i++) {
yleft_col[i] = (*(x->base_dst))[x->dst - 1 + i * x->dst_stride];
yleft_col[i] = (*(xd->base_dst))[xd->dst - 1 + i * xd->dst_stride];
}
switch (mode) {
case DC_PRED: {
......@@ -639,14 +643,14 @@ void vp8_intra8x8_predict(BLOCKD *x,
}
#if CONFIG_COMP_INTRA_PRED
void vp8_comp_intra8x8_predict(BLOCKD *x,
void vp8_comp_intra8x8_predict(BLOCKD *xd,
int mode, int second_mode,
unsigned char *out_predictor) {
unsigned char predictor[2][8 * 16];
int i, j;
vp8_intra8x8_predict(x, mode, predictor[0]);
vp8_intra8x8_predict(x, second_mode, predictor[1]);
vp8_intra8x8_predict(xd, mode, predictor[0]);
vp8_intra8x8_predict(xd, second_mode, predictor[1]);
for (i = 0; i < 8 * 16; i += 16) {
for (j = i; j < i + 8; j++) {
......@@ -656,17 +660,17 @@ void vp8_comp_intra8x8_predict(BLOCKD *x,
}
#endif
void vp8_intra_uv4x4_predict(BLOCKD *x,
void vp8_intra_uv4x4_predict(BLOCKD *xd,
int mode,
unsigned char *predictor) {
unsigned char *above_row = *(x->base_dst) + x->dst - x->dst_stride;
unsigned char *above_row = *(xd->base_dst) + xd->dst - xd->dst_stride;
unsigned char left_col[4];
unsigned char top_left = above_row[-1];
int r, c, i;
for (i = 0; i < 4; i++) {
left_col[i] = (*(x->base_dst))[x->dst - 1 + i * x->dst_stride];
left_col[i] = (*(xd->base_dst))[xd->dst - 1 + i * xd->dst_stride];
}
switch (mode) {
case DC_PRED: {
......@@ -752,14 +756,14 @@ void vp8_intra_uv4x4_predict(BLOCKD *x,
}
#if CONFIG_COMP_INTRA_PRED
void vp8_comp_intra_uv4x4_predict(BLOCKD *x,
void vp8_comp_intra_uv4x4_predict(BLOCKD *xd,
int mode, int mode2,
unsigned char *out_predictor) {
unsigned char predictor[2][8 * 4];
int i, j;
vp8_intra_uv4x4_predict(x, mode, predictor[0]);
vp8_intra_uv4x4_predict(x, mode2, predictor[1]);
vp8_intra_uv4x4_predict(xd, mode, predictor[0]);
vp8_intra_uv4x4_predict(xd, mode2, predictor[1]);
for (i = 0; i < 4 * 8; i += 8) {
for (j = i; j < i + 4; j++) {
......
......@@ -100,20 +100,20 @@ DECLARE_ALIGNED(16, const int, coef_bands_x_16x16[256]) = {
static const unsigned char cat6_prob[14] =
{ 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0 };
void vp8_reset_mb_tokens_context(MACROBLOCKD *x) {
void vp8_reset_mb_tokens_context(MACROBLOCKD *xd) {
/* Clear entropy contexts for Y2 blocks */
if ((x->mode_info_context->mbmi.mode != B_PRED &&
x->mode_info_context->mbmi.mode != I8X8_PRED &&
x->mode_info_context->mbmi.mode != SPLITMV)
if ((xd->mode_info_context->mbmi.mode != B_PRED &&
xd->mode_info_context->mbmi.mode != I8X8_PRED &&
xd->mode_info_context->mbmi.mode != SPLITMV)
#if CONFIG_TX16X16
|| x->mode_info_context->mbmi.txfm_size == TX_16X16
#endif
) {
vpx_memset(x->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(x->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));