Commit 460501fe authored by Ronald S. Bultje's avatar Ronald S. Bultje Committed by Gerrit Code Review
Browse files

Merge "Merge superblocks64 experiment." into experimental

parents 51bae955 6884a83f
...@@ -239,8 +239,6 @@ HAVE_LIST=" ...@@ -239,8 +239,6 @@ HAVE_LIST="
EXPERIMENT_LIST=" EXPERIMENT_LIST="
csm csm
comp_intra_pred comp_intra_pred
superblocks
superblocks64
pred_filter pred_filter
lossless lossless
subpelrefmv subpelrefmv
......
...@@ -232,9 +232,7 @@ typedef enum { ...@@ -232,9 +232,7 @@ typedef enum {
typedef enum { typedef enum {
BLOCK_SIZE_MB16X16 = 0, BLOCK_SIZE_MB16X16 = 0,
BLOCK_SIZE_SB32X32 = 1, BLOCK_SIZE_SB32X32 = 1,
#if CONFIG_SUPERBLOCKS64
BLOCK_SIZE_SB64X64 = 2, BLOCK_SIZE_SB64X64 = 2,
#endif
} BLOCK_SIZE_TYPE; } BLOCK_SIZE_TYPE;
typedef struct { typedef struct {
......
...@@ -198,7 +198,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -198,7 +198,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
above_src + 16, xd->dst.y_stride, &sse); above_src + 16, xd->dst.y_stride, &sse);
score += sse; score += sse;
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
vp9_sub_pixel_variance16x2(above_ref + offset + 32, vp9_sub_pixel_variance16x2(above_ref + offset + 32,
ref_y_stride, ref_y_stride,
...@@ -213,7 +212,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -213,7 +212,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
above_src + 48, xd->dst.y_stride, &sse); above_src + 48, xd->dst.y_stride, &sse);
score += sse; score += sse;
} }
#endif
} }
if (xd->left_available) { if (xd->left_available) {
vp9_sub_pixel_variance2x16_c(left_ref + offset, ref_y_stride, vp9_sub_pixel_variance2x16_c(left_ref + offset, ref_y_stride,
...@@ -230,7 +228,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -230,7 +228,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
xd->dst.y_stride, &sse); xd->dst.y_stride, &sse);
score += sse; score += sse;
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 32, vp9_sub_pixel_variance2x16_c(left_ref + offset + ref_y_stride * 32,
ref_y_stride, ref_y_stride,
...@@ -247,7 +244,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -247,7 +244,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
xd->dst.y_stride, &sse); xd->dst.y_stride, &sse);
score += sse; score += sse;
} }
#endif
} }
#else #else
row_offset = (this_mv.as_mv.row > 0) ? row_offset = (this_mv.as_mv.row > 0) ?
...@@ -263,14 +259,12 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -263,14 +259,12 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
score += vp9_sad16x3(above_src + 16, xd->dst.y_stride, score += vp9_sad16x3(above_src + 16, xd->dst.y_stride,
above_ref + offset + 16, ref_y_stride); above_ref + offset + 16, ref_y_stride);
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
score += vp9_sad16x3(above_src + 32, xd->dst.y_stride, score += vp9_sad16x3(above_src + 32, xd->dst.y_stride,
above_ref + offset + 32, ref_y_stride); above_ref + offset + 32, ref_y_stride);
score += vp9_sad16x3(above_src + 48, xd->dst.y_stride, score += vp9_sad16x3(above_src + 48, xd->dst.y_stride,
above_ref + offset + 48, ref_y_stride); above_ref + offset + 48, ref_y_stride);
} }
#endif
} }
if (xd->left_available) { if (xd->left_available) {
score += vp9_sad3x16(left_src, xd->dst.y_stride, score += vp9_sad3x16(left_src, xd->dst.y_stride,
...@@ -281,7 +275,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -281,7 +275,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
left_ref + offset + ref_y_stride * 16, left_ref + offset + ref_y_stride * 16,
ref_y_stride); ref_y_stride);
} }
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) { if (xd->mode_info_context->mbmi.sb_type >= BLOCK_SIZE_SB64X64) {
score += vp9_sad3x16(left_src + xd->dst.y_stride * 32, score += vp9_sad3x16(left_src + xd->dst.y_stride * 32,
xd->dst.y_stride, xd->dst.y_stride,
...@@ -292,7 +285,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd, ...@@ -292,7 +285,6 @@ void vp9_find_best_ref_mvs(MACROBLOCKD *xd,
left_ref + offset + ref_y_stride * 48, left_ref + offset + ref_y_stride * 48,
ref_y_stride); ref_y_stride);
} }
#endif
} }
#endif #endif
// Add the entry to our list and then resort the list on score. // Add the entry to our list and then resort the list on score.
......
...@@ -240,9 +240,7 @@ typedef struct VP9Common { ...@@ -240,9 +240,7 @@ typedef struct VP9Common {
vp9_prob prob_last_coded; vp9_prob prob_last_coded;
vp9_prob prob_gf_coded; vp9_prob prob_gf_coded;
vp9_prob sb32_coded; vp9_prob sb32_coded;
#if CONFIG_SUPERBLOCKS64
vp9_prob sb64_coded; vp9_prob sb64_coded;
#endif // CONFIG_SUPERBLOCKS64
// Context probabilities when using predictive coding of segment id // Context probabilities when using predictive coding of segment id
vp9_prob segment_pred_probs[PREDICTION_PROBS]; vp9_prob segment_pred_probs[PREDICTION_PROBS];
......
...@@ -614,7 +614,6 @@ void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x, ...@@ -614,7 +614,6 @@ void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
#endif #endif
} }
#if CONFIG_SUPERBLOCKS64
void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x, void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_y, uint8_t *dst_y,
uint8_t *dst_u, uint8_t *dst_u,
...@@ -678,7 +677,6 @@ void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x, ...@@ -678,7 +677,6 @@ void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
} }
#endif #endif
} }
#endif // CONFIG_SUPERBLOCKS64
/* /*
* The following functions should be called after an initial * The following functions should be called after an initial
......
...@@ -54,14 +54,12 @@ extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x, ...@@ -54,14 +54,12 @@ extern void vp9_build_inter32x32_predictors_sb(MACROBLOCKD *x,
int dst_ystride, int dst_ystride,
int dst_uvstride); int dst_uvstride);
#if CONFIG_SUPERBLOCKS64
extern void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x, extern void vp9_build_inter64x64_predictors_sb(MACROBLOCKD *x,
uint8_t *dst_y, uint8_t *dst_y,
uint8_t *dst_u, uint8_t *dst_u,
uint8_t *dst_v, uint8_t *dst_v,
int dst_ystride, int dst_ystride,
int dst_uvstride); int dst_uvstride);
#endif // CONFIG_SUPERBLOCKS64
extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd); extern void vp9_build_inter_predictors_mb(MACROBLOCKD *xd);
......
...@@ -703,7 +703,6 @@ void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd, ...@@ -703,7 +703,6 @@ void vp9_build_interintra_32x32_predictors_sb(MACROBLOCKD *xd,
vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride); vp9_build_interintra_32x32_predictors_sbuv(xd, upred, vpred, uvstride);
} }
#if CONFIG_SUPERBLOCKS64
void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd, void vp9_build_interintra_64x64_predictors_sby(MACROBLOCKD *xd,
uint8_t *ypred, uint8_t *ypred,
int ystride) { int ystride) {
...@@ -744,7 +743,6 @@ void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd, ...@@ -744,7 +743,6 @@ void vp9_build_interintra_64x64_predictors_sb(MACROBLOCKD *xd,
vp9_build_interintra_64x64_predictors_sby(xd, ypred, ystride); vp9_build_interintra_64x64_predictors_sby(xd, ypred, ystride);
vp9_build_interintra_64x64_predictors_sbuv(xd, upred, vpred, uvstride); vp9_build_interintra_64x64_predictors_sbuv(xd, upred, vpred, uvstride);
} }
#endif // CONFIG_SUPERBLOCKS64
#endif // CONFIG_COMP_INTERINTRA_PRED #endif // CONFIG_COMP_INTERINTRA_PRED
void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) { void vp9_build_intra_predictors_mby(MACROBLOCKD *xd) {
...@@ -768,14 +766,12 @@ void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) { ...@@ -768,14 +766,12 @@ void vp9_build_intra_predictors_sby_s(MACROBLOCKD *xd) {
xd->up_available, xd->left_available); xd->up_available, xd->left_available);
} }
#if CONFIG_SUPERBLOCKS64
void vp9_build_intra_predictors_sb64y_s(MACROBLOCKD *xd) { void vp9_build_intra_predictors_sb64y_s(MACROBLOCKD *xd) {
vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride, vp9_build_intra_predictors_internal(xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_buffer, xd->dst.y_stride, xd->dst.y_buffer, xd->dst.y_stride,
xd->mode_info_context->mbmi.mode, 64, xd->mode_info_context->mbmi.mode, 64,
xd->up_available, xd->left_available); xd->up_available, xd->left_available);
} }
#endif // CONFIG_SUPERBLOCKS64
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
void vp9_build_comp_intra_predictors_mby(MACROBLOCKD *xd) { void vp9_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
...@@ -834,14 +830,12 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) { ...@@ -834,14 +830,12 @@ void vp9_build_intra_predictors_sbuv_s(MACROBLOCKD *xd) {
16); 16);
} }
#if CONFIG_SUPERBLOCKS64
void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) { void vp9_build_intra_predictors_sb64uv_s(MACROBLOCKD *xd) {
vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer, vp9_build_intra_predictors_mbuv_internal(xd, xd->dst.u_buffer,
xd->dst.v_buffer, xd->dst.uv_stride, xd->dst.v_buffer, xd->dst.uv_stride,
xd->mode_info_context->mbmi.uv_mode, xd->mode_info_context->mbmi.uv_mode,
32); 32);
} }
#endif // CONFIG_SUPERBLOCKS64
#if CONFIG_COMP_INTRA_PRED #if CONFIG_COMP_INTRA_PRED
void vp9_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) { void vp9_build_comp_intra_predictors_mbuv(MACROBLOCKD *xd) {
......
...@@ -172,16 +172,12 @@ specialize vp9_build_intra_predictors_mbuv_s; ...@@ -172,16 +172,12 @@ specialize vp9_build_intra_predictors_mbuv_s;
prototype void vp9_build_comp_intra_predictors_mbuv "struct macroblockd *x" prototype void vp9_build_comp_intra_predictors_mbuv "struct macroblockd *x"
specialize vp9_build_comp_intra_predictors_mbuv; specialize vp9_build_comp_intra_predictors_mbuv;
if [ "$CONFIG_SUPERBLOCKS64" = "yes" ]; then
prototype void vp9_build_intra_predictors_sb64y_s "struct macroblockd *x" prototype void vp9_build_intra_predictors_sb64y_s "struct macroblockd *x"
specialize vp9_build_intra_predictors_sb64y_s; specialize vp9_build_intra_predictors_sb64y_s;
prototype void vp9_build_intra_predictors_sb64uv_s "struct macroblockd *x" prototype void vp9_build_intra_predictors_sb64uv_s "struct macroblockd *x"
specialize vp9_build_intra_predictors_sb64uv_s; specialize vp9_build_intra_predictors_sb64uv_s;
fi
prototype void vp9_intra4x4_predict "struct blockd *x, int b_mode, uint8_t *predictor" prototype void vp9_intra4x4_predict "struct blockd *x, int b_mode, uint8_t *predictor"
specialize vp9_intra4x4_predict; specialize vp9_intra4x4_predict;
......
...@@ -172,13 +172,10 @@ static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *xd) { ...@@ -172,13 +172,10 @@ static void mb_init_dequantizer(VP9D_COMP *pbi, MACROBLOCKD *xd) {
*/ */
static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) { static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) {
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) { if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) {
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) { if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
vp9_build_intra_predictors_sb64uv_s(xd); vp9_build_intra_predictors_sb64uv_s(xd);
vp9_build_intra_predictors_sb64y_s(xd); vp9_build_intra_predictors_sb64y_s(xd);
} else } else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
#endif // CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
vp9_build_intra_predictors_sbuv_s(xd); vp9_build_intra_predictors_sbuv_s(xd);
vp9_build_intra_predictors_sby_s(xd); vp9_build_intra_predictors_sby_s(xd);
} else { } else {
...@@ -186,7 +183,6 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) { ...@@ -186,7 +183,6 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) {
vp9_build_intra_predictors_mby_s(xd); vp9_build_intra_predictors_mby_s(xd);
} }
} else { } else {
#if CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) { if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
vp9_build_inter64x64_predictors_sb(xd, vp9_build_inter64x64_predictors_sb(xd,
xd->dst.y_buffer, xd->dst.y_buffer,
...@@ -194,9 +190,7 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) { ...@@ -194,9 +190,7 @@ static void skip_recon_mb(VP9D_COMP *pbi, MACROBLOCKD *xd) {
xd->dst.v_buffer, xd->dst.v_buffer,
xd->dst.y_stride, xd->dst.y_stride,
xd->dst.uv_stride); xd->dst.uv_stride);
} else } else if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
#endif // CONFIG_SUPERBLOCKS64
if (xd->mode_info_context->mbmi.sb_type == BLOCK_SIZE_SB32X32) {
vp9_build_inter32x32_predictors_sb(xd, vp9_build_inter32x32_predictors_sb(xd,
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.u_buffer, xd->dst.u_buffer,
...@@ -701,7 +695,6 @@ static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd, ...@@ -701,7 +695,6 @@ static void decode_4x4_sb(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->dst.uv_stride, xd->eobs + 16, xd); xd->dst.uv_stride, xd->eobs + 16, xd);
}; };
#if CONFIG_SUPERBLOCKS64
static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd, static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, unsigned int mb_col, int mb_row, unsigned int mb_col,
BOOL_DECODER* const bc) { BOOL_DECODER* const bc) {
...@@ -831,7 +824,6 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd, ...@@ -831,7 +824,6 @@ static void decode_superblock64(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->left_context = pc->left_context; xd->left_context = pc->left_context;
xd->mode_info_context = orig_mi; xd->mode_info_context = orig_mi;
} }
#endif // CONFIG_SUPERBLOCKS64
static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd, static void decode_superblock32(VP9D_COMP *pbi, MACROBLOCKD *xd,
int mb_row, unsigned int mb_col, int mb_row, unsigned int mb_col,
...@@ -1184,16 +1176,13 @@ static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc, ...@@ -1184,16 +1176,13 @@ static void decode_sb_row(VP9D_COMP *pbi, VP9_COMMON *pc,
vpx_memset(pc->left_context, 0, sizeof(pc->left_context)); vpx_memset(pc->left_context, 0, sizeof(pc->left_context));
for (mb_col = 0; mb_col < pc->mb_cols; mb_col += 4) { for (mb_col = 0; mb_col < pc->mb_cols; mb_col += 4) {
#if CONFIG_SUPERBLOCKS64
if (vp9_read(bc, pc->sb64_coded)) { if (vp9_read(bc, pc->sb64_coded)) {
set_offsets(pbi, 64, mb_row, mb_col); set_offsets(pbi, 64, mb_row, mb_col);
vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc); vp9_decode_mb_mode_mv(pbi, xd, mb_row, mb_col, bc);
set_refs(pbi, 64, mb_row, mb_col); set_refs(pbi, 64, mb_row, mb_col);
decode_superblock64(pbi, xd, mb_row, mb_col, bc); decode_superblock64(pbi, xd, mb_row, mb_col, bc);
xd->corrupted |= bool_error(bc); xd->corrupted |= bool_error(bc);
} else } else {
#endif // CONFIG_SUPERBLOCKS64
{
int j; int j;
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
...@@ -1596,9 +1585,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) { ...@@ -1596,9 +1585,7 @@ int vp9_decode_frame(VP9D_COMP *pbi, const unsigned char **p_data_end) {
} }
} }
#if CONFIG_SUPERBLOCKS64
pc->sb64_coded = vp9_read_literal(&header_bc, 8); pc->sb64_coded = vp9_read_literal(&header_bc, 8);
#endif
pc->sb32_coded = vp9_read_literal(&header_bc, 8); pc->sb32_coded = vp9_read_literal(&header_bc, 8);
/* Read the loop filter level and type */ /* Read the loop filter level and type */
......
...@@ -1122,13 +1122,10 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc) { ...@@ -1122,13 +1122,10 @@ static void write_modes(VP9_COMP *cpi, vp9_writer* const bc) {
for (mb_row = 0; mb_row < c->mb_rows; mb_row += 4, m_ptr += 4 * mis) { for (mb_row = 0; mb_row < c->mb_rows; mb_row += 4, m_ptr += 4 * mis) {
m = m_ptr; m = m_ptr;
for (mb_col = 0; mb_col < c->mb_cols; mb_col += 4, m += 4) { for (mb_col = 0; mb_col < c->mb_cols; mb_col += 4, m += 4) {
#if CONFIG_SUPERBLOCKS64
vp9_write(bc, m->mbmi.sb_type == BLOCK_SIZE_SB64X64, c->sb64_coded); vp9_write(bc, m->mbmi.sb_type == BLOCK_SIZE_SB64X64, c->sb64_coded);
if (m->mbmi.sb_type == BLOCK_SIZE_SB64X64) { if (m->mbmi.sb_type == BLOCK_SIZE_SB64X64) {
write_modes_b(cpi, m, bc, &tok, tok_end, mb_row, mb_col); write_modes_b(cpi, m, bc, &tok, tok_end, mb_row, mb_col);
} else } else {
#endif
{
int j; int j;
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
...@@ -1689,10 +1686,8 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest, ...@@ -1689,10 +1686,8 @@ void vp9_pack_bitstream(VP9_COMP *cpi, unsigned char *dest,
} }
} }
#if CONFIG_SUPERBLOCKS64
pc->sb64_coded = get_binary_prob(cpi->sb64_count[0], cpi->sb64_count[1]); pc->sb64_coded = get_binary_prob(cpi->sb64_count[0], cpi->sb64_count[1]);
vp9_write_literal(&header_bc, pc->sb64_coded, 8); vp9_write_literal(&header_bc, pc->sb64_coded, 8);
#endif
pc->sb32_coded = get_binary_prob(cpi->sb32_count[0], cpi->sb32_count[1]); pc->sb32_coded = get_binary_prob(cpi->sb32_count[0], cpi->sb32_count[1]);
vp9_write_literal(&header_bc, pc->sb32_coded, 8); vp9_write_literal(&header_bc, pc->sb32_coded, 8);
......
...@@ -180,9 +180,7 @@ typedef struct macroblock { ...@@ -180,9 +180,7 @@ typedef struct macroblock {
PICK_MODE_CONTEXT mb_context[4][4]; PICK_MODE_CONTEXT mb_context[4][4];
// when 4 MBs share coding parameters: // when 4 MBs share coding parameters:
PICK_MODE_CONTEXT sb32_context[4]; PICK_MODE_CONTEXT sb32_context[4];
#if CONFIG_SUPERBLOCKS64
PICK_MODE_CONTEXT sb64_context; PICK_MODE_CONTEXT sb64_context;
#endif // CONFIG_SUPERBLOCKS64
void (*vp9_short_fdct4x4)(int16_t *input, int16_t *output, int pitch); void (*vp9_short_fdct4x4)(int16_t *input, int16_t *output, int pitch);
void (*vp9_short_fdct8x4)(int16_t *input, int16_t *output, int pitch); void (*vp9_short_fdct8x4)(int16_t *input, int16_t *output, int pitch);
......
...@@ -888,7 +888,6 @@ static void pick_sb_modes(VP9_COMP *cpi, ...@@ -888,7 +888,6 @@ static void pick_sb_modes(VP9_COMP *cpi,
} }
} }
#if CONFIG_SUPERBLOCKS64
static void pick_sb64_modes(VP9_COMP *cpi, static void pick_sb64_modes(VP9_COMP *cpi,
int mb_row, int mb_row,
int mb_col, int mb_col,
...@@ -924,7 +923,6 @@ static void pick_sb64_modes(VP9_COMP *cpi, ...@@ -924,7 +923,6 @@ static void pick_sb64_modes(VP9_COMP *cpi,
totaldist); totaldist);
} }
} }
#endif // CONFIG_SUPERBLOCKS64
static void update_stats(VP9_COMP *cpi) { static void update_stats(VP9_COMP *cpi) {
VP9_COMMON *const cm = &cpi->common; VP9_COMMON *const cm = &cpi->common;
...@@ -1054,7 +1052,6 @@ static void encode_sb(VP9_COMP *cpi, ...@@ -1054,7 +1052,6 @@ static void encode_sb(VP9_COMP *cpi,
#endif #endif
} }
#if CONFIG_SUPERBLOCKS64
static void encode_sb64(VP9_COMP *cpi, static void encode_sb64(VP9_COMP *cpi,
int mb_row, int mb_row,
int mb_col, int mb_col,
...@@ -1094,7 +1091,6 @@ static void encode_sb64(VP9_COMP *cpi, ...@@ -1094,7 +1091,6 @@ static void encode_sb64(VP9_COMP *cpi,
} }
} }
} }
#endif // CONFIG_SUPERBLOCKS64
static void encode_sb_row(VP9_COMP *cpi, static void encode_sb_row(VP9_COMP *cpi,
int mb_row, int mb_row,
...@@ -1114,14 +1110,12 @@ static void encode_sb_row(VP9_COMP *cpi, ...@@ -1114,14 +1110,12 @@ static void encode_sb_row(VP9_COMP *cpi,
int i; int i;
int sb32_rate = 0, sb32_dist = 0; int sb32_rate = 0, sb32_dist = 0;
int is_sb[4]; int is_sb[4];
#if CONFIG_SUPERBLOCKS64
int sb64_rate = INT_MAX, sb64_dist; int sb64_rate = INT_MAX, sb64_dist;
ENTROPY_CONTEXT_PLANES l[4], a[4]; ENTROPY_CONTEXT_PLANES l[4], a[4];
TOKENEXTRA *tp_orig = *tp; TOKENEXTRA *tp_orig = *tp;
memcpy(&a, cm->above_context + mb_col, sizeof(a)); memcpy(&a, cm->above_context + mb_col, sizeof(a));
memcpy(&l, cm->left_context, sizeof(l)); memcpy(&l, cm->left_context, sizeof(l));
#endif // CONFIG_SUPERBLOCKS64
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
const int x_idx = (i & 1) << 1, y_idx = i & 2; const int x_idx = (i & 1) << 1, y_idx = i & 2;
int mb_rate = 0, mb_dist = 0; int mb_rate = 0, mb_dist = 0;
...@@ -1163,11 +1157,9 @@ static void encode_sb_row(VP9_COMP *cpi, ...@@ -1163,11 +1157,9 @@ static void encode_sb_row(VP9_COMP *cpi,
// pixels of the lower level; also, inverting SB/MB order (big->small // pixels of the lower level; also, inverting SB/MB order (big->small
// instead of small->big) means we can use as threshold for small, which // instead of small->big) means we can use as threshold for small, which
// may enable breakouts if RD is not good enough (i.e. faster) // may enable breakouts if RD is not good enough (i.e. faster)
encode_sb(cpi, mb_row + y_idx, mb_col + x_idx, encode_sb(cpi, mb_row + y_idx, mb_col + x_idx, 0, tp, is_sb[i]);
!CONFIG_SUPERBLOCKS64, tp, is_sb[i]);
} }
#if CONFIG_SUPERBLOCKS64
memcpy(cm->above_context + mb_col, &a, sizeof(a)); memcpy(cm->above_context + mb_col, &a, sizeof(a));
memcpy(cm->left_context, &l, sizeof(l)); memcpy(cm->left_context, &l, sizeof(l));
sb32_rate += vp9_cost_bit(cm->sb64_coded, 0); sb32_rate += vp9_cost_bit(cm->sb64_coded, 0);
...@@ -1184,17 +1176,13 @@ static void encode_sb_row(VP9_COMP *cpi, ...@@ -1184,17 +1176,13 @@ static void encode_sb_row(VP9_COMP *cpi,
RDCOST(x->rdmult, x->rddiv, sb32_rate, sb32_dist)) { RDCOST(x->rdmult, x->rddiv, sb32_rate, sb32_dist)) {
is_sb[0] = 2; is_sb[0] = 2;
*totalrate += sb64_rate; *totalrate += sb64_rate;
} else } else {
#endif
{
*totalrate += sb32_rate; *totalrate += sb32_rate;
} }
#if CONFIG_SUPERBLOCKS64
assert(tp_orig == *tp); assert(tp_orig == *tp);
encode_sb64(cpi, mb_row, mb_col, tp, is_sb); encode_sb64(cpi, mb_row, mb_col, tp, is_sb);
assert(tp_orig < *tp); assert(tp_orig < *tp);
#endif // CONFIG_SUPERBLOCKS64
} }
} }
...@@ -1244,9 +1232,7 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) { ...@@ -1244,9 +1232,7 @@ static void init_encode_frame_mb_context(VP9_COMP *cpi) {
vp9_zero(cpi->common.fc.mv_ref_ct) vp9_zero(cpi->common.fc.mv_ref_ct)
vp9_zero(cpi->sb_ymode_count) vp9_zero(cpi->sb_ymode_count)
vp9_zero(cpi->sb32_count); vp9_zero(cpi->sb32_count);
#if CONFIG_SUPERBLOCKS64