Commit 9ed87471 authored by Scott LaVarnway's avatar Scott LaVarnway
Browse files

threading.c refactoring

Added recon above/left to MACROBLOCKD
Reworked decode_macroblock

Change-Id: I9c26870af75797134f410acbd02942065b3495c1
parent 2578b767
......@@ -220,6 +220,10 @@ typedef struct macroblockd
int up_available;
int left_available;
unsigned char *recon_above[3];
unsigned char *recon_left[3];
int recon_left_stride[2];
/* Y,U,V,Y2 */
ENTROPY_CONTEXT_PLANES *above_context;
ENTROPY_CONTEXT_PLANES *left_context;
......
......@@ -153,24 +153,23 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
}
#endif
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv_s(xd,
xd->dst.u_buffer - xd->dst.uv_stride,
xd->dst.v_buffer - xd->dst.uv_stride,
xd->dst.u_buffer - 1,
xd->dst.v_buffer - 1,
xd->dst.uv_stride,
xd->recon_above[1],
xd->recon_above[2],
xd->recon_left[1],
xd->recon_left[2],
xd->recon_left_stride[1],
xd->dst.u_buffer, xd->dst.v_buffer);
if (mode != B_PRED)
{
vp8_build_intra_predictors_mby_s(xd,
xd->dst.y_buffer - xd->dst.y_stride,
xd->dst.y_buffer - 1,
xd->dst.y_stride,
xd->recon_above[0],
xd->recon_left[0],
xd->recon_left_stride[0],
xd->dst.y_buffer);
}
else
......@@ -183,7 +182,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
if(xd->mode_info_context->mbmi.mb_skip_coeff)
vpx_memset(xd->eobs, 0, 25);
intra_prediction_down_copy(xd, xd->dst.y_buffer - dst_stride + 16);
intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
for (i = 0; i < 16; i++)
{
......@@ -383,6 +382,22 @@ static void decode_mb_rows(VP8D_COMP *pbi)
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;
xd->recon_left[0] = xd->recon_above[0] - 1;
xd->recon_left[1] = xd->recon_above[1] - 1;
xd->recon_left[2] = xd->recon_above[2] - 1;
xd->recon_above[0] -= xd->dst.y_stride;
xd->recon_above[1] -= xd->dst.uv_stride;
xd->recon_above[2] -= xd->dst.uv_stride;
//TODO: move to outside row loop
xd->recon_left_stride[0] = xd->dst.y_stride;
xd->recon_left_stride[1] = xd->dst.uv_stride;
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
/* Distance of Mb to the various image edges.
......@@ -437,6 +452,14 @@ static void decode_mb_rows(VP8D_COMP *pbi)
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
xd->recon_above[0] += 16;
xd->recon_above[1] += 8;
xd->recon_above[2] += 8;
xd->recon_left[0] += 16;
xd->recon_left[1] += 8;
xd->recon_left[2] += 8;
recon_yoffset += 16;
recon_uvoffset += 8;
......
......@@ -82,11 +82,13 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_D
pbi->mt_current_mb_col[i]=-1;
}
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col)
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, unsigned int mb_idx)
{
int throw_residual = 0;
MB_PREDICTION_MODE mode;
int i;
#if CONFIG_ERROR_CONCEALMENT
int corruption_detected = 0;
#endif
if (xd->mode_info_context->mbmi.mb_skip_coeff)
{
......@@ -101,208 +103,196 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
xd->mode_info_context->mbmi.mb_skip_coeff = (eobtotal==0);
}
mode = xd->mode_info_context->mbmi.mode;
if (xd->segmentation_enabled)
mb_init_dequantizer(pbi, xd);
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
#if CONFIG_ERROR_CONCEALMENT
if(pbi->ec_active)
{
if (pbi->common.filter_level)
int throw_residual;
/* When we have independent partitions we can apply residual even
* though other partitions within the frame are corrupt.
*/
throw_residual = (!pbi->independent_partitions &&
pbi->frame_corrupt_residual);
throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
if ((mb_idx >= pbi->mvs_corrupt_from_mb || throw_residual))
{
unsigned char *uabove_row;
unsigned char *vabove_row;
unsigned char * uleft_col;
unsigned char * vleft_col;
uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
uleft_col = pbi->mt_uleft_col[mb_row];
vleft_col = pbi->mt_vleft_col[mb_row];
vp8_build_intra_predictors_mbuv_s(xd, uabove_row,
vabove_row,
uleft_col,
vleft_col,
1,
xd->dst.u_buffer, xd->dst.v_buffer);
if (xd->mode_info_context->mbmi.mode != B_PRED)
{
unsigned char *yabove_row;
unsigned char *yleft_col;
/* MB with corrupt residuals or corrupt mode/motion vectors.
* Better to use the predictor as reconstruction.
*/
pbi->frame_corrupt_residual = 1;
vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
vp8_conceal_corrupt_mb(xd);
yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
yleft_col = pbi->mt_yleft_col[mb_row];
vp8_build_intra_predictors_mby_s(xd,
yabove_row,
yleft_col,
1,
xd->dst.y_buffer);
}
corruption_detected = 1;
/* force idct to be skipped for B_PRED and use the
* prediction only for reconstruction
* */
vpx_memset(xd->eobs, 0, 25);
}
}
#endif
/* do prediction */
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{
vp8_build_intra_predictors_mbuv_s(xd,
xd->recon_above[1],
xd->recon_above[2],
xd->recon_left[1],
xd->recon_left[2],
xd->recon_left_stride[1],
xd->dst.u_buffer, xd->dst.v_buffer);
if (mode != B_PRED)
{
vp8_build_intra_predictors_mby_s(xd,
xd->recon_above[0],
xd->recon_left[0],
xd->recon_left_stride[0],
xd->dst.y_buffer);
}
else
{
vp8_build_intra_predictors_mbuv_s(xd,
xd->dst.u_buffer - xd->dst.uv_stride,
xd->dst.v_buffer - xd->dst.uv_stride,
xd->dst.u_buffer - 1,
xd->dst.v_buffer - 1,
xd->dst.uv_stride,
xd->dst.u_buffer, xd->dst.v_buffer);
if (xd->mode_info_context->mbmi.mode != B_PRED)
short *DQC = xd->dequant_y1;
int dst_stride = xd->dst.y_stride;
unsigned char *base_dst = xd->dst.y_buffer;
/* clear out residual eob info */
if(xd->mode_info_context->mbmi.mb_skip_coeff)
vpx_memset(xd->eobs, 0, 25);
intra_prediction_down_copy(xd, xd->recon_above[0] + 16);
for (i = 0; i < 16; i++)
{
vp8_build_intra_predictors_mby_s(xd,
xd->dst.y_buffer - xd->dst.y_stride,
xd->dst.y_buffer - 1,
xd->dst.y_stride,
xd->dst.y_buffer);
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode;
unsigned char *yabove;
unsigned char *yleft;
int left_stride;
unsigned char top_left;
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
if (i < 4 && pbi->common.filter_level)
yabove = xd->recon_above[0] + b->offset; //i*4;
else
yabove = (base_dst - dst_stride) + b->offset;
if (i%4==0 && pbi->common.filter_level)
{
yleft = xd->recon_left[0] + i;
left_stride = 1;
}
else
{
yleft = (base_dst - 1) + b->offset;
left_stride = dst_stride;
}
if ((i==4 || i==8 || i==12) && pbi->common.filter_level)
top_left = *(xd->recon_left[0] + i - 1);
else
top_left = yabove[-1];
vp8_intra4x4_predict_d_c(yabove, yleft, left_stride,
b_mode,
base_dst + b->offset, dst_stride,
top_left);
if (xd->eobs[i] )
{
if (xd->eobs[i] > 1)
{
vp8_dequant_idct_add
(b->qcoeff, DQC,
base_dst + b->offset, dst_stride);
}
else
{
vp8_dc_only_idct_add
(b->qcoeff[0] * DQC[0],
base_dst + b->offset, dst_stride,
base_dst + b->offset, dst_stride);
((int *)b->qcoeff)[0] = 0;
}
}
}
}
}
else
{
vp8_build_inter_predictors_mb(xd);
}
/* When we have independent partitions we can apply residual even
* though other partitions within the frame are corrupt.
*/
throw_residual = (!pbi->independent_partitions &&
pbi->frame_corrupt_residual);
throw_residual = (throw_residual || vp8dx_bool_error(xd->current_bc));
#if CONFIG_ERROR_CONCEALMENT
if (pbi->ec_active &&
(mb_row * pbi->common.mb_cols + mb_col >= pbi->mvs_corrupt_from_mb ||
throw_residual))
if (corruption_detected)
{
/* MB with corrupt residuals or corrupt mode/motion vectors.
* Better to use the predictor as reconstruction.
*/
pbi->frame_corrupt_residual = 1;
vpx_memset(xd->qcoeff, 0, sizeof(xd->qcoeff));
vp8_conceal_corrupt_mb(xd);
return;
}
#endif
/* dequantization and idct */
if (xd->mode_info_context->mbmi.mode == B_PRED)
if(!xd->mode_info_context->mbmi.mb_skip_coeff)
{
short *DQC = xd->dequant_y1;
int dst_stride = xd->dst.y_stride;
unsigned char *base_dst = xd->dst.y_buffer;
unsigned char *above_right_src;
if (pbi->common.filter_level)
above_right_src = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16;
else
above_right_src = xd->dst.y_buffer - dst_stride + 16;
intra_prediction_down_copy(xd, above_right_src);
for (i = 0; i < 16; i++)
/* dequantization and idct */
if (mode != B_PRED)
{
BLOCKD *b = &xd->block[i];
int b_mode = xd->mode_info_context->bmi[i].as_mode;
unsigned char *yabove;
unsigned char *yleft;
int left_stride;
unsigned char top_left;
/*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
if (i < 4 && pbi->common.filter_level)
yabove = pbi->mt_yabove_row[mb_row] + mb_col*16 + i*4 + 32;
else
yabove = base_dst + b->offset - dst_stride;
short *DQC = xd->dequant_y1;
if (i%4==0 && pbi->common.filter_level)
if (mode != SPLITMV)
{
yleft = pbi->mt_yleft_col[mb_row] + i;
left_stride = 1;
}
else
{
yleft = base_dst + b->offset - 1;
left_stride = dst_stride;
}
if ((i==4 || i==8 || i==12) && pbi->common.filter_level)
top_left = pbi->mt_yleft_col[mb_row][i-1];
else
top_left = yabove[-1];
vp8_intra4x4_predict_d_c(yabove, yleft, left_stride,
b_mode,
base_dst + b->offset, dst_stride,
top_left);
BLOCKD *b = &xd->block[24];
if (xd->eobs[i] )
{
if (xd->eobs[i] > 1)
/* do 2nd order transform on the dc block */
if (xd->eobs[24] > 1)
{
vp8_dequant_idct_add
(b->qcoeff, DQC,
base_dst + b->offset, dst_stride);
vp8_dequantize_b(b, xd->dequant_y2);
vp8_short_inv_walsh4x4(&b->dqcoeff[0],
xd->qcoeff);
((int *)b->qcoeff)[0] = 0;
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
}
else
{
vp8_dc_only_idct_add
(b->qcoeff[0] * DQC[0],
base_dst + b->offset, dst_stride,
base_dst + b->offset, dst_stride);
b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
vp8_short_inv_walsh4x4_1(&b->dqcoeff[0],
xd->qcoeff);
((int *)b->qcoeff)[0] = 0;
}
}
}
}
else
{
short *DQC = xd->dequant_y1;
if (xd->mode_info_context->mbmi.mode != SPLITMV)
{
BLOCKD *b = &xd->block[24];
/* do 2nd order transform on the dc block */
if (xd->eobs[24] > 1)
{
vp8_dequantize_b(b, xd->dequant_y2);
vp8_short_inv_walsh4x4(&b->dqcoeff[0],
xd->qcoeff);
((int *)b->qcoeff)[0] = 0;
((int *)b->qcoeff)[1] = 0;
((int *)b->qcoeff)[2] = 0;
((int *)b->qcoeff)[3] = 0;
((int *)b->qcoeff)[4] = 0;
((int *)b->qcoeff)[5] = 0;
((int *)b->qcoeff)[6] = 0;
((int *)b->qcoeff)[7] = 0;
}
else
{
b->dqcoeff[0] = b->qcoeff[0] * xd->dequant_y2[0];
vp8_short_inv_walsh4x4_1(&b->dqcoeff[0], xd->qcoeff);
((int *)b->qcoeff)[0] = 0;
/* override the dc dequant constant in order to preserve the
* dc components
*/
DQC = xd->dequant_y1_dc;
}
/* override the dc dequant constant */
DQC = xd->dequant_y1_dc;
vp8_dequant_idct_add_y_block
(xd->qcoeff, DQC,
xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
}
vp8_dequant_idct_add_y_block
(xd->qcoeff, DQC,
xd->dst.y_buffer,
xd->dst.y_stride, xd->eobs);
vp8_dequant_idct_add_uv_block
(xd->qcoeff+16*16, xd->dequant_uv,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16);
}
vp8_dequant_idct_add_uv_block
(xd->qcoeff+16*16, xd->dequant_uv,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs+16);
}
typedef void (*init_current_bc_fn_t)(VP8D_COMP *pbi, MACROBLOCKD *xd,
......@@ -392,6 +382,39 @@ static void decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row,
xd->mb_to_top_edge = -((mb_row * 16)) << 3;
xd->mb_to_bottom_edge = ((pc->mb_rows - 1 - mb_row) * 16) << 3;
if (pbi->common.filter_level)
{
xd->recon_above[0] = pbi->mt_yabove_row[mb_row] + 0*16 +32;
xd->recon_above[1] = pbi->mt_uabove_row[mb_row] + 0*8 +16;
xd->recon_above[2] = pbi->mt_vabove_row[mb_row] + 0*8 +16;
xd->recon_left[0] = pbi->mt_yleft_col[mb_row];
xd->recon_left[1] = pbi->mt_uleft_col[mb_row];
xd->recon_left[2] = pbi->mt_vleft_col[mb_row];
//TODO: move to outside row loop
xd->recon_left_stride[0] = 1;
xd->recon_left_stride[1] = 1;
}
else
{
xd->recon_above[0] = dst_buffer[0] + recon_yoffset;
xd->recon_above[1] = dst_buffer[1] + recon_uvoffset;
xd->recon_above[2] = dst_buffer[2] + recon_uvoffset;
xd->recon_left[0] = xd->recon_above[0] - 1;
xd->recon_left[1] = xd->recon_above[1] - 1;
xd->recon_left[2] = xd->recon_above[2] - 1;
xd->recon_above[0] -= xd->dst.y_stride;
xd->recon_above[1] -= xd->dst.uv_stride;
xd->recon_above[2] -= xd->dst.uv_stride;
//TODO: move to outside row loop
xd->recon_left_stride[0] = xd->dst.y_stride;
xd->recon_left_stride[1] = xd->dst.uv_stride;
}
for (mb_col = 0; mb_col < pc->mb_cols; mb_col++)
{
if ( mb_row > 0 && (mb_col & (nsync-1)) == 0)
......@@ -452,13 +475,24 @@ static void decode_mb_rows(VP8D_COMP *pbi, MACROBLOCKD *xd, int start_mb_row,
/* propagate errors from reference frames */
xd->corrupted |= ref_fb_corrupted[xd->mode_info_context->mbmi.ref_frame];
decode_macroblock(pbi, xd, mb_row, mb_col);
decode_macroblock(pbi, xd, 0);
xd->left_available = 1;
/* check if the boolean decoder has suffered an error */
xd->corrupted |= vp8dx_bool_error(xd->current_bc);
xd->recon_above[0] += 16;
xd->recon_above[1] += 8;
xd->recon_above[2] += 8;
if (!pbi->common.filter_level)
{
xd->recon_left[0] += 16;
xd->recon_left[1] += 8;
xd->recon_left[2] += 8;
}
if (pbi->common.filter_level)
{
int skip_lf = (xd->mode_info_context->mbmi.mode != B_PRED &&
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment