Commit 1f80a568 authored by John Koleszar's avatar John Koleszar

Make vp9_optimize_sb* common

Unify the various vp9_optimize_sb functions into one that handles all
transform sizes.

Change-Id: I48b642fbfb3e72cc2e0bcf1d0317a80a80547882
parent d068d869
......@@ -1014,6 +1014,74 @@ static uint8_t* raster_block_offset_uint8(MACROBLOCKD *xd,
return base + raster_block_offset(xd, bsize, plane, block, stride);
}
static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE bsize,
int plane, int block,
int ss_txfrm_size) {
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
const int txwl = ss_txfrm_size / 2;
const int tx_cols_lg2 = bwl - txwl;
const int tx_cols = 1 << tx_cols_lg2;
const int raster_mb = block >> ss_txfrm_size;
const int x = (raster_mb & (tx_cols - 1)) << (txwl);
const int y = raster_mb >> tx_cols_lg2 << (txwl);
return x + (y << bwl);
}
static void txfrm_block_to_raster_xy(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE bsize,
int plane, int block,
int ss_txfrm_size,
int *x, int *y) {
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
const int txwl = ss_txfrm_size / 2;
const int tx_cols_lg2 = bwl - txwl;
const int tx_cols = 1 << tx_cols_lg2;
const int raster_mb = block >> ss_txfrm_size;
*x = (raster_mb & (tx_cols - 1)) << (txwl);
*y = raster_mb >> tx_cols_lg2 << (txwl);
}
static TX_SIZE tx_size_for_plane(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize,
int plane) {
// TODO(jkoleszar): This duplicates a ton of code, but we're going to be
// moving this to a per-plane lookup shortly, and this will go away then.
if (!plane) {
return xd->mode_info_context->mbmi.txfm_size;
} else {
const int bw = b_width_log2(bsize), bh = b_height_log2(bsize);
#if !CONFIG_SB8X8
const MB_PREDICTION_MODE mode = xd->mode_info_context->mbmi.mode;
const int is_split =
xd->mode_info_context->mbmi.txfm_size == TX_8X8 &&
(mode == I8X8_PRED || mode == SPLITMV);
#endif
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
const TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
const int block_size_b = bw + bh;
const int txfrm_size_b = tx_size * 2;
// subsampled size of the block
const int ss_sum = xd->plane[plane].subsampling_x +
xd->plane[plane].subsampling_y;
const int ss_block_size = block_size_b - ss_sum;
// size of the transform to use. scale the transform down if it's larger
// than the size of the subsampled data, or forced externally by the mb mode
const int ss_max = MAX(xd->plane[plane].subsampling_x,
xd->plane[plane].subsampling_y);
const int ss_txfrm_size = txfrm_size_b > ss_block_size
#if !CONFIG_SB8X8
|| is_split
#endif // !CONFIG_SB8X8
? txfrm_size_b - ss_max * 2
: txfrm_size_b;
return (TX_SIZE)(ss_txfrm_size / 2);
}
}
#if CONFIG_CODE_ZEROGROUP
static int get_zpc_used(TX_SIZE tx_size) {
return (tx_size >= TX_16X16);
......
......@@ -358,21 +358,6 @@ static void decode_4x4(VP9D_COMP *pbi, MACROBLOCKD *xd, vp9_reader *r) {
}
#endif
static int txfrm_block_to_raster_block(MACROBLOCKD *xd,
BLOCK_SIZE_TYPE bsize,
int plane, int block,
int ss_txfrm_size) {
const int bwl = b_width_log2(bsize) - xd->plane[plane].subsampling_x;
const int txwl = ss_txfrm_size / 2;
const int tx_cols_lg2 = bwl - txwl;
const int tx_cols = 1 << tx_cols_lg2;
const int raster_mb = block >> ss_txfrm_size;
const int x = (raster_mb & (tx_cols - 1)) << (txwl);
const int y = raster_mb >> tx_cols_lg2 << (txwl);
return x + (y << bwl);
}
static void decode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
int ss_txfrm_size, void *arg) {
MACROBLOCKD* const xd = arg;
......
......@@ -2443,7 +2443,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_subtract_sbuv(x, bsize);
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
vp9_optimize_sbuv_4x4(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
vp9_recon_sbuv(xd, bsize);
......@@ -2507,11 +2507,11 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_quantize_sbuv_16x16(x, bsize);
}
if (x->optimize) {
vp9_optimize_sby_32x32(cm, x, bsize);
vp9_optimize_sby(cm, x, bsize);
if (bsize == BLOCK_SIZE_SB64X64)
vp9_optimize_sbuv_32x32(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
else
vp9_optimize_sbuv_16x16(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
}
vp9_inverse_transform_sby_32x32(xd, bsize);
if (bsize == BLOCK_SIZE_SB64X64)
......@@ -2530,11 +2530,11 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_quantize_sbuv_8x8(x, bsize);
}
if (x->optimize) {
vp9_optimize_sby_16x16(cm, x, bsize);
vp9_optimize_sby(cm, x, bsize);
if (bsize >= BLOCK_SIZE_SB32X32)
vp9_optimize_sbuv_16x16(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
else
vp9_optimize_sbuv_8x8(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
}
vp9_inverse_transform_sby_16x16(xd, bsize);
if (bsize >= BLOCK_SIZE_SB32X32)
......@@ -2546,19 +2546,19 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_transform_sby_8x8(x, bsize);
vp9_quantize_sby_8x8(x, bsize);
if (x->optimize)
vp9_optimize_sby_8x8(cm, x, bsize);
vp9_optimize_sby(cm, x, bsize);
vp9_inverse_transform_sby_8x8(xd, bsize);
if (bsize >= BLOCK_SIZE_MB16X16) {
vp9_transform_sbuv_8x8(x, bsize);
vp9_quantize_sbuv_8x8(x, bsize);
if (x->optimize)
vp9_optimize_sbuv_8x8(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
vp9_inverse_transform_sbuv_8x8(xd, bsize);
} else {
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
if (x->optimize)
vp9_optimize_sbuv_4x4(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
}
break;
......@@ -2568,8 +2568,8 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_quantize_sby_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
if (x->optimize) {
vp9_optimize_sby_4x4(cm, x, bsize);
vp9_optimize_sbuv_4x4(cm, x, bsize);
vp9_optimize_sby(cm, x, bsize);
vp9_optimize_sbuv(cm, x, bsize);
}
vp9_inverse_transform_sby_4x4(xd, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
......
......@@ -114,21 +114,21 @@ void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
vp9_transform_sby_16x16(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_16x16(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby_16x16(cm, x, BLOCK_SIZE_MB16X16);
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_16x16(xd, BLOCK_SIZE_MB16X16);
break;
case TX_8X8:
vp9_transform_sby_8x8(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_8x8(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby_8x8(cm, x, BLOCK_SIZE_MB16X16);
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_8x8(xd, BLOCK_SIZE_MB16X16);
break;
default:
vp9_transform_sby_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_4x4(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby_4x4(cm, x, BLOCK_SIZE_MB16X16);
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_4x4(xd, BLOCK_SIZE_MB16X16);
break;
}
......@@ -148,14 +148,14 @@ void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
vp9_transform_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sbuv_4x4(cm, x, BLOCK_SIZE_MB16X16);
vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sbuv_4x4(xd, BLOCK_SIZE_MB16X16);
break;
default: // 16x16 or 8x8
vp9_transform_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sbuv_8x8(cm, x, BLOCK_SIZE_MB16X16);
vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sbuv_8x8(xd, BLOCK_SIZE_MB16X16);
break;
}
......
This diff is collapsed.
......@@ -32,29 +32,18 @@ void vp9_encode_inter16x16(VP9_COMMON *const cm, MACROBLOCK *x,
void vp9_encode_inter16x16y(MACROBLOCK *x, int mb_row, int mb_col);
void vp9_transform_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_32x32(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_16x16(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_8x8(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_4x4(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_32x32(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_16x16(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_8x8(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_4x4(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
#if !CONFIG_SB8X8
void vp9_fidct_mb(VP9_COMMON *const cm, MACROBLOCK *x);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment