Commit 4529c68b authored by John Koleszar's avatar John Koleszar
Browse files

Separate transform and quant from vp9_encode_sb

This allows removing a large number of transform size specific functions,
as well as supporting 444/alpha by routing all code through the
subsampling-aware path.

Change-Id: Ieb085cebe9f37f24fc24de179898b22abfda08a4
parent 3f4e8063
......@@ -19,153 +19,3 @@ void vp9_inverse_transform_b_4x4(MACROBLOCKD *xd, int eob,
else
xd->inv_txm4x4(dqcoeff, diff, pitch);
}
void vp9_inverse_transform_b_8x8(int16_t *input_dqcoeff, int16_t *output_coeff,
int pitch) {
vp9_short_idct8x8(input_dqcoeff, output_coeff, pitch);
}
void vp9_inverse_transform_b_16x16(int16_t *input_dqcoeff,
int16_t *output_coeff, int pitch) {
vp9_short_idct16x16(input_dqcoeff, output_coeff, pitch);
}
void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 3, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 3);
const int stride = 32 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const int offset = x_idx * 32 + y_idx * 32 * stride;
vp9_short_idct32x32(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 1024),
xd->plane[0].diff + offset, stride * 2);
}
}
void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 2);
const int stride = 16 << bwl, bstride = 4 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_16x16(xd,
(y_idx * bstride + x_idx) * 4);
const int offset = x_idx * 16 + y_idx * 16 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 256),
xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht16x16(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 256),
xd->plane[0].diff + offset, stride, tx_type);
}
}
}
void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 1);
const int stride = 8 << bwl, bstride = 2 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_8x8(xd, (y_idx * bstride + x_idx) * 2);
const int offset = x_idx * 8 + y_idx * 8 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 64),
xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht8x8(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 64),
xd->plane[0].diff + offset, stride, tx_type);
}
}
}
void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bw = 1 << bwl;
const int bh = 1 << b_height_log2(bsize);
const int stride = 4 << bwl, bstride = 1 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_4x4(xd, y_idx * bstride + x_idx);
const int offset = x_idx * 4 + y_idx * 4 * stride;
if (tx_type == DCT_DCT) {
vp9_inverse_transform_b_4x4(xd, xd->plane[0].eobs[n],
BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
xd->plane[0].diff + offset, stride * 2);
} else {
vp9_short_iht4x4(BLOCK_OFFSET(xd->plane[0].dqcoeff, n, 16),
xd->plane[0].diff + offset, stride, tx_type);
}
}
}
void vp9_inverse_transform_sbuv_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
assert(bsize == BLOCK_SIZE_SB64X64);
vp9_short_idct32x32(xd->plane[1].dqcoeff, xd->plane[1].diff, 64);
vp9_short_idct32x32(xd->plane[2].dqcoeff, xd->plane[2].diff, 64);
}
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bhl = b_height_log2(bsize) - 2;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 16 << (bwl - 1);
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
const int off = x_idx * 16 + y_idx * stride * 16;
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 256),
xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_16x16(BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 256),
xd->plane[2].diff + off, stride * 2);
}
}
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bhl = b_height_log2(bsize) - 1;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 8 << (bwl - 1);
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
const int off = x_idx * 8 + y_idx * stride * 8;
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 64),
xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_8x8(BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 64),
xd->plane[2].diff + off, stride * 2);
}
}
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 4 << (bwl - 1);
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
const int off = x_idx * 4 + y_idx * stride * 4;
vp9_inverse_transform_b_4x4(xd, xd->plane[1].eobs[n],
BLOCK_OFFSET(xd->plane[1].dqcoeff, n, 16),
xd->plane[1].diff + off, stride * 2);
vp9_inverse_transform_b_4x4(xd, xd->plane[2].eobs[n],
BLOCK_OFFSET(xd->plane[2].dqcoeff, n, 16),
xd->plane[2].diff + off, stride * 2);
}
}
......@@ -18,20 +18,4 @@
void vp9_inverse_transform_b_4x4(MACROBLOCKD *xd, int eob,
int16_t *dqcoeff, int16_t *diff,
int pitch);
void vp9_inverse_transform_b_8x8(int16_t *input_dqcoeff,
int16_t *output_coeff, int pitch);
void vp9_inverse_transform_b_16x16(int16_t *input_dqcoeff,
int16_t *output_coeff, int pitch);
void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
#endif // VP9_COMMON_VP9_INVTRANS_H_
......@@ -164,12 +164,12 @@ struct macroblock {
void (*fwd_txm16x16)(int16_t *input, int16_t *output, int pitch);
void (*quantize_b_4x4)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
int y_blocks);
#if !CONFIG_SB8X8
void (*quantize_b_4x4_pair)(MACROBLOCK *x, int b_idx1, int b_idx2,
int y_blocks);
void (*quantize_b_16x16)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
int y_blocks);
void (*quantize_b_8x8)(MACROBLOCK *x, int b_idx, TX_TYPE tx_type,
int y_blocks);
#endif
};
#endif // VP9_ENCODER_VP9_BLOCK_H_
......@@ -2435,13 +2435,7 @@ static void encode_superblock(VP9_COMP *cpi, TOKENEXTRA **t,
vp9_encode_intra4x4mby(x, bsize);
vp9_build_intra_predictors_sbuv_s(&x->e_mbd, bsize);
vp9_subtract_sbuv(x, bsize);
vp9_transform_sbuv_4x4(x, bsize);
vp9_quantize_sbuv_4x4(x, bsize);
if (x->optimize)
vp9_optimize_sbuv(cm, x, bsize);
vp9_inverse_transform_sbuv_4x4(xd, bsize);
vp9_recon_sbuv(xd, bsize);
vp9_encode_sbuv(cm, x, bsize);
if (output_enabled)
sum_intra_stats(cpi, x);
......
......@@ -104,63 +104,16 @@ void vp9_encode_intra4x4mby(MACROBLOCK *mb, BLOCK_SIZE_TYPE bsize) {
void vp9_encode_intra16x16mby(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
vp9_build_intra_predictors_sby_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sby(x, BLOCK_SIZE_MB16X16);
switch (tx_size) {
case TX_16X16:
vp9_transform_sby_16x16(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_16x16(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_16x16(xd, BLOCK_SIZE_MB16X16);
break;
case TX_8X8:
vp9_transform_sby_8x8(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_8x8(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_8x8(xd, BLOCK_SIZE_MB16X16);
break;
default:
vp9_transform_sby_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sby_4x4(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sby(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sby_4x4(xd, BLOCK_SIZE_MB16X16);
break;
}
vp9_recon_sby(xd, BLOCK_SIZE_MB16X16);
vp9_encode_sby(cm, x, BLOCK_SIZE_MB16X16);
}
void vp9_encode_intra16x16mbuv(VP9_COMMON *const cm, MACROBLOCK *x) {
MACROBLOCKD *xd = &x->e_mbd;
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
vp9_build_intra_predictors_sbuv_s(xd, BLOCK_SIZE_MB16X16);
vp9_subtract_sbuv(x, BLOCK_SIZE_MB16X16);
switch (tx_size) {
case TX_4X4:
vp9_transform_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sbuv_4x4(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sbuv_4x4(xd, BLOCK_SIZE_MB16X16);
break;
default: // 16x16 or 8x8
vp9_transform_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
vp9_quantize_sbuv_8x8(x, BLOCK_SIZE_MB16X16);
if (x->optimize)
vp9_optimize_sbuv(cm, x, BLOCK_SIZE_MB16X16);
vp9_inverse_transform_sbuv_8x8(xd, BLOCK_SIZE_MB16X16);
break;
}
vp9_recon_sbuv(xd, BLOCK_SIZE_MB16X16);
vp9_encode_sbuv(cm, x, BLOCK_SIZE_MB16X16);
}
#if !CONFIG_SB8X8
......
......@@ -67,143 +67,6 @@ void vp9_subtract_sb(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
}
void vp9_transform_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 3, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 3);
const int stride = 32 << bwl;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
vp9_short_fdct32x32(x->plane[0].src_diff + y_idx * stride * 32 + x_idx * 32,
x->plane[0].coeff + n * 1024, stride * 2);
}
}
void vp9_transform_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 2);
const int stride = 16 << bwl, bstride = 4 << bwl;
MACROBLOCKD *const xd = &x->e_mbd;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_16x16(xd,
(y_idx * bstride + x_idx) * 4);
if (tx_type != DCT_DCT) {
vp9_short_fht16x16(x->plane[0].src_diff +
y_idx * stride * 16 + x_idx * 16,
x->plane[0].coeff + n * 256, stride, tx_type);
} else {
x->fwd_txm16x16(x->plane[0].src_diff + y_idx * stride * 16 + x_idx * 16,
x->plane[0].coeff + n * 256, stride * 2);
}
}
}
void vp9_transform_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bw = 1 << bwl;
const int bh = 1 << (b_height_log2(bsize) - 1);
const int stride = 8 << bwl, bstride = 2 << bwl;
MACROBLOCKD *const xd = &x->e_mbd;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_8x8(xd, (y_idx * bstride + x_idx) * 2);
if (tx_type != DCT_DCT) {
vp9_short_fht8x8(x->plane[0].src_diff + y_idx * stride * 8 + x_idx * 8,
x->plane[0].coeff + n * 64, stride, tx_type);
} else {
x->fwd_txm8x8(x->plane[0].src_diff + y_idx * stride * 8 + x_idx * 8,
x->plane[0].coeff + n * 64, stride * 2);
}
}
}
void vp9_transform_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bw = 1 << bwl;
const int bh = 1 << b_height_log2(bsize);
const int stride = 4 << bwl;
MACROBLOCKD *const xd = &x->e_mbd;
int n;
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
if (tx_type != DCT_DCT) {
vp9_short_fht4x4(x->plane[0].src_diff + y_idx * stride * 4 + x_idx * 4,
x->plane[0].coeff + n * 16, stride, tx_type);
} else {
x->fwd_txm4x4(x->plane[0].src_diff + y_idx * stride * 4 + x_idx * 4,
x->plane[0].coeff + n * 16, stride * 2);
}
}
}
void vp9_transform_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
assert(bsize == BLOCK_SIZE_SB64X64);
vp9_clear_system_state();
vp9_short_fdct32x32(x->plane[1].src_diff, x->plane[1].coeff, 64);
vp9_short_fdct32x32(x->plane[2].src_diff, x->plane[2].coeff, 64);
}
void vp9_transform_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 2, bhl = b_height_log2(bsize) - 2;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 16 << (bwl - 1);
int n;
vp9_clear_system_state();
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
x->fwd_txm16x16(x->plane[1].src_diff + y_idx * stride * 16 + x_idx * 16,
x->plane[1].coeff + n * 256, stride * 2);
x->fwd_txm16x16(x->plane[2].src_diff + y_idx * stride * 16 + x_idx * 16,
x->plane[2].coeff + n * 256, stride * 2);
}
}
void vp9_transform_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize) - 1, bhl = b_height_log2(bsize) - 1;
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 8 << (bwl - 1);
int n;
vp9_clear_system_state();
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
x->fwd_txm8x8(x->plane[1].src_diff + y_idx * stride * 8 + x_idx * 8,
x->plane[1].coeff + n * 64, stride * 2);
x->fwd_txm8x8(x->plane[2].src_diff + y_idx * stride * 8 + x_idx * 8,
x->plane[2].coeff + n * 64, stride * 2);
}
}
void vp9_transform_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = b_width_log2(bsize), bhl = b_height_log2(bsize);
const int bw = 1 << (bwl - 1), bh = 1 << (bhl - 1);
const int stride = 4 << (bwl - 1);
int n;
vp9_clear_system_state();
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> (bwl - 1);
x->fwd_txm4x4(x->plane[1].src_diff + y_idx * stride * 4 + x_idx * 4,
x->plane[1].coeff + n * 16, stride * 2);
x->fwd_txm4x4(x->plane[2].src_diff + y_idx * stride * 4 + x_idx * 4,
x->plane[2].coeff + n * 16, stride * 2);
}
}
#define RDTRUNC(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
#define RDTRUNC_8x8(RM,DM,R,D) ( (128+(R)*(RM)) & 0xFF )
typedef struct vp9_token_state vp9_token_state;
......@@ -561,7 +424,7 @@ struct encode_b_args {
struct optimize_ctx *ctx;
};
static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
static void xform_quant(int plane, int block, BLOCK_SIZE_TYPE bsize,
int ss_txfrm_size, void *arg) {
struct encode_b_args* const args = arg;
MACROBLOCK* const x = args->x;
......@@ -572,9 +435,6 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
int16_t* const src_diff = raster_block_offset_int16(xd, bsize, plane,
raster_block,
x->plane[plane].src_diff);
int16_t* const diff = raster_block_offset_int16(xd, bsize, plane,
raster_block,
xd->plane[plane].diff);
TX_TYPE tx_type = DCT_DCT;
switch (ss_txfrm_size / 2) {
......@@ -624,6 +484,23 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
}
vp9_quantize(x, plane, block, 16 << ss_txfrm_size, tx_type);
}
static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
int ss_txfrm_size, void *arg) {
struct encode_b_args* const args = arg;
MACROBLOCK* const x = args->x;
MACROBLOCKD* const xd = &x->e_mbd;
const int bw = 4 << (b_width_log2(bsize) - xd->plane[plane].subsampling_x);
const int raster_block = txfrm_block_to_raster_block(xd, bsize, plane,
block, ss_txfrm_size);
int16_t* const diff = raster_block_offset_int16(xd, bsize, plane,
raster_block,
xd->plane[plane].diff);
TX_TYPE tx_type = DCT_DCT;
xform_quant(plane, block, bsize, ss_txfrm_size, arg);
if (x->optimize)
vp9_optimize_b(plane, block, bsize, ss_txfrm_size, args->cm, x, args->ctx);
......@@ -633,6 +510,7 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
diff, bw * 2);
break;
case TX_16X16:
tx_type = plane == 0 ? get_tx_type_16x16(xd, raster_block) : DCT_DCT;
if (tx_type == DCT_DCT) {
vp9_short_idct16x16(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
diff, bw * 2);
......@@ -642,6 +520,7 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
}
break;
case TX_8X8:
tx_type = plane == 0 ? get_tx_type_8x8(xd, raster_block) : DCT_DCT;
if (tx_type == DCT_DCT) {
vp9_short_idct8x8(BLOCK_OFFSET(xd->plane[plane].dqcoeff, block, 16),
diff, bw * 2);
......@@ -651,6 +530,7 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
}
break;
case TX_4X4:
tx_type = plane == 0 ? get_tx_type_4x4(xd, raster_block) : DCT_DCT;
if (tx_type == DCT_DCT) {
// this is like vp9_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
......@@ -665,6 +545,60 @@ static void encode_block(int plane, int block, BLOCK_SIZE_TYPE bsize,
}
}
void vp9_xform_quant_sby(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD* const xd = &x->e_mbd;
struct encode_b_args arg = {cm, x, NULL};
foreach_transformed_block_in_plane(xd, bsize, 0,
#if !CONFIG_SB8X8
0,
#endif
xform_quant, &arg);
}
void vp9_xform_quant_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD* const xd = &x->e_mbd;
struct encode_b_args arg = {cm, x, NULL};
foreach_transformed_block_uv(xd, bsize, xform_quant, &arg);
}
void vp9_encode_sby(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD* const xd = &x->e_mbd;
struct optimize_ctx ctx;
struct encode_b_args arg = {cm, x, &ctx};
vp9_subtract_sby(x, bsize);
if (x->optimize)
vp9_optimize_init(xd, bsize, &ctx);
foreach_transformed_block_in_plane(xd, bsize, 0,
#if !CONFIG_SB8X8
0,
#endif
encode_block, &arg);
vp9_recon_sby(xd, bsize);
}
void vp9_encode_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD* const xd = &x->e_mbd;
struct optimize_ctx ctx;
struct encode_b_args arg = {cm, x, &ctx};
vp9_subtract_sbuv(x, bsize);
if (x->optimize)
vp9_optimize_init(xd, bsize, &ctx);
foreach_transformed_block_uv(xd, bsize, encode_block, &arg);
vp9_recon_sbuv(xd, bsize);
}
void vp9_encode_sb(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize) {
MACROBLOCKD* const xd = &x->e_mbd;
......
......@@ -22,18 +22,6 @@ typedef struct {
MV_REFERENCE_FRAME second_ref_frame;
} MODE_DEFINITION;
#if !CONFIG_SB8X8
#endif
void vp9_transform_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
struct optimize_ctx {
ENTROPY_CONTEXT ta[MAX_MB_PLANE][16];
ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
......@@ -49,6 +37,14 @@ void vp9_optimize_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_encode_sb(VP9_COMMON *const cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_encode_sby(VP9_COMMON *const cm, MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_encode_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_xform_quant_sby(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_xform_quant_sbuv(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_subtract_block(int rows, int cols,
int16_t *diff_ptr, int diff_stride,
......
......@@ -867,9 +867,10 @@ void vp9_set_speed_features(VP9_COMP *cpi) {
}
cpi->mb.quantize_b_4x4 = vp9_regular_quantize_b_4x4;
#if !CONFIG_SB8X8
cpi->mb.quantize_b_4x4_pair = vp9_regular_quantize_b_4x4_pair;
cpi->mb.quantize_b_8x8 = vp9_regular_quantize_b_8x8;
cpi->mb.quantize_b_16x16 = vp9_regular_quantize_b_16x16;