Commit 11fec186 authored by Ronald S. Bultje's avatar Ronald S. Bultje Committed by Gerrit Code Review
Browse files

Merge "Remove 'thismb' data pointer when superblock experiment is on." into experimental

parents 6fb8953c 73987d14
...@@ -86,9 +86,11 @@ typedef struct { ...@@ -86,9 +86,11 @@ typedef struct {
typedef struct macroblock { typedef struct macroblock {
DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y DECLARE_ALIGNED(16, short, src_diff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y DECLARE_ALIGNED(16, short, coeff[400]); // 16x16 Y 8x8 U 8x8 V 4x4 2nd Y
#if !CONFIG_SUPERBLOCKS
DECLARE_ALIGNED(16, unsigned char, thismb[256]); // 16x16 Y DECLARE_ALIGNED(16, unsigned char, thismb[256]); // 16x16 Y
unsigned char *thismb_ptr; unsigned char *thismb_ptr;
#endif
// 16 Y blocks, 4 U blocks, 4 V blocks, // 16 Y blocks, 4 U blocks, 4 V blocks,
// 1 DC 2nd order block each with 16 entries // 1 DC 2nd order block each with 16 entries
BLOCK block[25]; BLOCK block[25];
......
...@@ -294,8 +294,10 @@ static void build_activity_map(VP9_COMP *cpi) { ...@@ -294,8 +294,10 @@ static void build_activity_map(VP9_COMP *cpi) {
xd->left_available = (mb_col != 0); xd->left_available = (mb_col != 0);
recon_yoffset += 16; recon_yoffset += 16;
#endif #endif
#if !CONFIG_SUPERBLOCKS
// Copy current mb to a buffer // Copy current mb to a buffer
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
// measure activity // measure activity
mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col); mb_activity = mb_activity_measure(cpi, x, mb_row, mb_col);
...@@ -575,8 +577,10 @@ static void pick_mb_modes(VP9_COMP *cpi, ...@@ -575,8 +577,10 @@ static void pick_mb_modes(VP9_COMP *cpi,
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if !CONFIG_SUPERBLOCKS
// Copy current MB to a work buffer // Copy current MB to a work buffer
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
x->rddiv = cpi->RDDIV; x->rddiv = cpi->RDDIV;
x->rdmult = cpi->RDMULT; x->rdmult = cpi->RDMULT;
...@@ -953,8 +957,10 @@ static void encode_sb(VP9_COMP *cpi, ...@@ -953,8 +957,10 @@ static void encode_sb(VP9_COMP *cpi,
xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset; xd->dst.u_buffer = cm->yv12_fb[dst_fb_idx].u_buffer + recon_uvoffset;
xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset; xd->dst.v_buffer = cm->yv12_fb[dst_fb_idx].v_buffer + recon_uvoffset;
#if !CONFIG_SUPERBLOCKS
// Copy current MB to a work buffer // Copy current MB to a work buffer
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
if (cpi->oxcf.tuning == VP8_TUNE_SSIM) if (cpi->oxcf.tuning == VP8_TUNE_SSIM)
vp9_activity_masking(cpi, x); vp9_activity_masking(cpi, x);
...@@ -1694,6 +1700,7 @@ void vp9_build_block_offsets(MACROBLOCK *x) { ...@@ -1694,6 +1700,7 @@ void vp9_build_block_offsets(MACROBLOCK *x) {
vp9_build_block_doffsets(&x->e_mbd); vp9_build_block_doffsets(&x->e_mbd);
#if !CONFIG_SUPERBLOCKS
// y blocks // y blocks
x->thismb_ptr = &x->thismb[0]; x->thismb_ptr = &x->thismb[0];
for (br = 0; br < 4; br++) { for (br = 0; br < 4; br++) {
...@@ -1708,6 +1715,20 @@ void vp9_build_block_offsets(MACROBLOCK *x) { ...@@ -1708,6 +1715,20 @@ void vp9_build_block_offsets(MACROBLOCK *x) {
++block; ++block;
} }
} }
#else
for (br = 0; br < 4; br++) {
for (bc = 0; bc < 4; bc++) {
BLOCK *this_block = &x->block[block];
// this_block->base_src = &x->src.y_buffer;
// this_block->src_stride = x->src.y_stride;
// this_block->src = 4 * br * this_block->src_stride + 4 * bc;
this_block->base_src = &x->src.y_buffer;
this_block->src_stride = x->src.y_stride;
this_block->src = 4 * br * this_block->src_stride + 4 * bc;
++block;
}
}
#endif
// u blocks // u blocks
for (br = 0; br < 2; br++) { for (br = 0; br < 2; br++) {
......
...@@ -521,8 +521,10 @@ void vp9_first_pass(VP9_COMP *cpi) { ...@@ -521,8 +521,10 @@ void vp9_first_pass(VP9_COMP *cpi) {
xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset; xd->dst.v_buffer = new_yv12->v_buffer + recon_uvoffset;
xd->left_available = (mb_col != 0); xd->left_available = (mb_col != 0);
#if !CONFIG_SUPERBLOCKS
// Copy current mb to a buffer // Copy current mb to a buffer
vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16); vp9_copy_mem16x16(x->src.y_buffer, x->src.y_stride, x->thismb, 16);
#endif
// do intra 16x16 prediction // do intra 16x16 prediction
this_error = vp9_encode_intra(cpi, x, use_dc_pred); this_error = vp9_encode_intra(cpi, x, use_dc_pred);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment