Commit f1e62932 authored by Ronald S. Bultje's avatar Ronald S. Bultje
Browse files

Add a _4x4 sufix to all 4x4-transform-related functions.

This includes trellis optimization, forward/inverse transform,
quantization, tokenization and stuffing functions.

Change-Id: Ibd34132e1bf0cd667671a57b3f25b3d361b9bf8a
parent e03715fe
...@@ -31,7 +31,7 @@ static void recon_dcblock_8x8(MACROBLOCKD *xd) { ...@@ -31,7 +31,7 @@ static void recon_dcblock_8x8(MACROBLOCKD *xd) {
} }
void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) { void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch) {
if (b->eob <= 1) if (b->eob <= 1)
IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch); IDCT_INVOKE(rtcd, idct1)(b->dqcoeff, b->diff, pitch);
else else
...@@ -39,8 +39,8 @@ void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int ...@@ -39,8 +39,8 @@ void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int
} }
void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) { MACROBLOCKD *xd) {
int i; int i;
BLOCKD *blockd = xd->block; BLOCKD *blockd = xd->block;
...@@ -50,24 +50,24 @@ void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, ...@@ -50,24 +50,24 @@ void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd,
recon_dcblock(xd); recon_dcblock(xd);
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
vp8_inverse_transform_b(rtcd, &blockd[i], 32); vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
} }
} }
void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) { MACROBLOCKD *xd) {
int i; int i;
BLOCKD *blockd = xd->block; BLOCKD *blockd = xd->block;
for (i = 16; i < 24; i++) { for (i = 16; i < 24; i++) {
vp8_inverse_transform_b(rtcd, &blockd[i], 16); vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
} }
} }
void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd,
MACROBLOCKD *xd) { MACROBLOCKD *xd) {
int i; int i;
BLOCKD *blockd = xd->block; BLOCKD *blockd = xd->block;
...@@ -81,12 +81,12 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, ...@@ -81,12 +81,12 @@ void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd,
} }
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
vp8_inverse_transform_b(rtcd, &blockd[i], 32); vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 32);
} }
for (i = 16; i < 24; i++) { for (i = 16; i < 24; i++) {
vp8_inverse_transform_b(rtcd, &blockd[i], 16); vp8_inverse_transform_b_4x4(rtcd, &blockd[i], 16);
} }
} }
......
...@@ -16,10 +16,10 @@ ...@@ -16,10 +16,10 @@
#include "idct.h" #include "idct.h"
#include "blockd.h" #include "blockd.h"
extern void vp8_inverse_transform_b(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch); extern void vp8_inverse_transform_b_4x4(const vp8_idct_rtcd_vtable_t *rtcd, BLOCKD *b, int pitch);
extern void vp8_inverse_transform_mb(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd); extern void vp8_inverse_transform_mb_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mby(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd); extern void vp8_inverse_transform_mby_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_mbuv(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd); extern void vp8_inverse_transform_mbuv_4x4(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch); extern void vp8_inverse_transform_b_8x8(const vp8_idct_rtcd_vtable_t *rtcd, short *input_dqcoeff, short *output_coeff, int pitch);
extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd); extern void vp8_inverse_transform_mb_8x8(const vp8_idct_rtcd_vtable_t *rtcd, MACROBLOCKD *xd);
......
...@@ -183,8 +183,8 @@ typedef struct { ...@@ -183,8 +183,8 @@ typedef struct {
void (*vp8_short_fdct4x4)(short *input, short *output, int pitch); void (*vp8_short_fdct4x4)(short *input, short *output, int pitch);
void (*vp8_short_fdct8x4)(short *input, short *output, int pitch); void (*vp8_short_fdct8x4)(short *input, short *output, int pitch);
void (*short_walsh4x4)(short *input, short *output, int pitch); void (*short_walsh4x4)(short *input, short *output, int pitch);
void (*quantize_b)(BLOCK *b, BLOCKD *d); void (*quantize_b_4x4)(BLOCK *b, BLOCKD *d);
void (*quantize_b_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1); void (*quantize_b_4x4_pair)(BLOCK *b1, BLOCK *b2, BLOCKD *d0, BLOCKD *d1);
void (*vp8_short_fdct8x8)(short *input, short *output, int pitch); void (*vp8_short_fdct8x8)(short *input, short *output, int pitch);
void (*vp8_short_fdct16x16)(short *input, short *output, int pitch); void (*vp8_short_fdct16x16)(short *input, short *output, int pitch);
void (*short_fhaar2x2)(short *input, short *output, int pitch); void (*short_fhaar2x2)(short *input, short *output, int pitch);
......
...@@ -52,8 +52,8 @@ int enc_debug = 0; ...@@ -52,8 +52,8 @@ int enc_debug = 0;
int mb_row_debug, mb_col_debug; int mb_row_debug, mb_col_debug;
#endif #endif
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *xd, extern void vp8_stuff_mb_4x4(VP8_COMP *cpi, MACROBLOCKD *xd,
TOKENEXTRA **t, int dry_run); TOKENEXTRA **t, int dry_run);
extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex); extern void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex);
extern void vp8_auto_select_speed(VP8_COMP *cpi); extern void vp8_auto_select_speed(VP8_COMP *cpi);
...@@ -2153,7 +2153,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x, ...@@ -2153,7 +2153,7 @@ void vp8cx_encode_inter_macroblock (VP8_COMP *cpi, MACROBLOCK *x,
cpi->skip_true_count[mb_skip_context]++; cpi->skip_true_count[mb_skip_context]++;
vp8_fix_contexts(xd); vp8_fix_contexts(xd);
} else { } else {
vp8_stuff_mb(cpi, xd, t, !output_enabled); vp8_stuff_mb_4x4(cpi, xd, t, !output_enabled);
mbmi->mb_skip_coeff = 0; mbmi->mb_skip_coeff = 0;
if (output_enabled) if (output_enabled)
cpi->skip_false_count[mb_skip_context]++; cpi->skip_false_count[mb_skip_context]++;
...@@ -2352,7 +2352,7 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, ...@@ -2352,7 +2352,7 @@ void vp8cx_encode_inter_superblock(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
cpi->skip_true_count[mb_skip_context]++; cpi->skip_true_count[mb_skip_context]++;
vp8_fix_contexts(xd); vp8_fix_contexts(xd);
} else { } else {
vp8_stuff_mb(cpi, xd, t, 0); vp8_stuff_mb_4x4(cpi, xd, t, 0);
xd->mode_info_context->mbmi.mb_skip_coeff = 0; xd->mode_info_context->mbmi.mb_skip_coeff = 0;
cpi->skip_false_count[mb_skip_context]++; cpi->skip_false_count[mb_skip_context]++;
} }
......
...@@ -28,10 +28,6 @@ ...@@ -28,10 +28,6 @@
#define IF_RTCD(x) NULL #define IF_RTCD(x) NULL
#endif #endif
#if CONFIG_HYBRIDTRANSFORM
extern void vp8_ht_quantize_b(BLOCK *b, BLOCKD *d);
#endif
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) { int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_16x16_pred) {
int i; int i;
int intra_pred_var = 0; int intra_pred_var = 0;
...@@ -89,17 +85,17 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, ...@@ -89,17 +85,17 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
b->bmi.as_mode.test = b->bmi.as_mode.first; b->bmi.as_mode.test = b->bmi.as_mode.first;
txfm_map(b, b->bmi.as_mode.first); txfm_map(b, b->bmi.as_mode.first);
vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4); vp8_fht_c(be->src_diff, be->coeff, 32, b->bmi.as_mode.tx_type, 4);
vp8_ht_quantize_b(be, b); vp8_ht_quantize_b_4x4(be, b);
vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4); vp8_ihtllm_c(b->dqcoeff, b->diff, 32, b->bmi.as_mode.tx_type, 4);
} else { } else {
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ; x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32) ;
x->quantize_b(be, b) ; x->quantize_b_4x4(be, b) ;
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32) ; vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32) ;
} }
#else #else
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32); x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b(be, b); x->quantize_b_4x4(be, b);
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32); vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
#endif #endif
RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride); RECON_INVOKE(&rtcd->common->recon, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
...@@ -156,14 +152,14 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -156,14 +152,14 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_transform_intra_mby_8x8(x); vp8_transform_intra_mby_8x8(x);
else else
vp8_transform_intra_mby(x); vp8_transform_intra_mby_4x4(x);
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
vp8_quantize_mby_16x16(x); vp8_quantize_mby_16x16(x);
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_quantize_mby_8x8(x); vp8_quantize_mby_8x8(x);
else else
vp8_quantize_mby(x); vp8_quantize_mby_4x4(x);
if (x->optimize) { if (x->optimize) {
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
...@@ -171,7 +167,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -171,7 +167,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_optimize_mby_8x8(x, rtcd); vp8_optimize_mby_8x8(x, rtcd);
else else
vp8_optimize_mby(x, rtcd); vp8_optimize_mby_4x4(x, rtcd);
} }
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
...@@ -190,7 +186,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -190,7 +186,7 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else else
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, recon_mby) RECON_INVOKE(&rtcd->common->recon, recon_mby)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd); (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
...@@ -214,24 +210,24 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -214,24 +210,24 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
if (tx_type == TX_8X8) if (tx_type == TX_8X8)
vp8_transform_mbuv_8x8(x); vp8_transform_mbuv_8x8(x);
else else
vp8_transform_mbuv(x); vp8_transform_mbuv_4x4(x);
if (tx_type == TX_8X8) if (tx_type == TX_8X8)
vp8_quantize_mbuv_8x8(x); vp8_quantize_mbuv_8x8(x);
else else
vp8_quantize_mbuv(x); vp8_quantize_mbuv_4x4(x);
if (x->optimize) { if (x->optimize) {
if (tx_type == TX_8X8) if (tx_type == TX_8X8)
vp8_optimize_mbuv_8x8(x, rtcd); vp8_optimize_mbuv_8x8(x, rtcd);
else else
vp8_optimize_mbuv(x, rtcd); vp8_optimize_mbuv_4x4(x, rtcd);
} }
if (tx_type == TX_8X8) if (tx_type == TX_8X8)
vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mbuv_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else else
vp8_inverse_transform_mbuv(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mbuv_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd); vp8_recon_intra_mbuv(IF_RTCD(&rtcd->common->recon), &x->e_mbd);
} }
...@@ -280,8 +276,8 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd, ...@@ -280,8 +276,8 @@ void vp8_encode_intra8x8(const VP8_ENCODER_RTCD *rtcd,
be = &x->block[ib + iblock[i]]; be = &x->block[ib + iblock[i]];
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16); ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32); x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b(be, b); x->quantize_b_4x4(be, b);
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 32); vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 32);
} }
} }
...@@ -324,9 +320,9 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd, ...@@ -324,9 +320,9 @@ void vp8_encode_intra_uv4x4(const VP8_ENCODER_RTCD *rtcd,
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16); x->vp8_short_fdct4x4(be->src_diff, be->coeff, 16);
x->quantize_b(be, b); x->quantize_b_4x4(be, b);
vp8_inverse_transform_b(IF_RTCD(&rtcd->common->idct), b, 16); vp8_inverse_transform_b_4x4(IF_RTCD(&rtcd->common->idct), b, 16);
RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor, RECON_INVOKE(&rtcd->common->recon, recon_uv)(b->predictor,
b->diff, *(b->base_dst) + b->dst, b->dst_stride); b->diff, *(b->base_dst) + b->dst, b->dst_stride);
......
...@@ -132,7 +132,7 @@ static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -132,7 +132,7 @@ static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride); ENCODEMB_INVOKE(&rtcd->encodemb, submbuv)(x->src_diff, x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor, x->src.uv_stride);
} }
static void build_dcblock(MACROBLOCK *x) { static void build_dcblock_4x4(MACROBLOCK *x) {
short *src_diff_ptr = &x->src_diff[384]; short *src_diff_ptr = &x->src_diff[384];
int i; int i;
...@@ -140,6 +140,7 @@ static void build_dcblock(MACROBLOCK *x) { ...@@ -140,6 +140,7 @@ static void build_dcblock(MACROBLOCK *x) {
src_diff_ptr[i] = x->coeff[i * 16]; src_diff_ptr[i] = x->coeff[i * 16];
} }
} }
void vp8_build_dcblock_8x8(MACROBLOCK *x) { void vp8_build_dcblock_8x8(MACROBLOCK *x) {
short *src_diff_ptr = &x->src_diff[384]; short *src_diff_ptr = &x->src_diff[384];
int i; int i;
...@@ -152,7 +153,7 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x) { ...@@ -152,7 +153,7 @@ void vp8_build_dcblock_8x8(MACROBLOCK *x) {
src_diff_ptr[8] = x->coeff[12 * 16]; src_diff_ptr[8] = x->coeff[12 * 16];
} }
void vp8_transform_mbuv(MACROBLOCK *x) { void vp8_transform_mbuv_4x4(MACROBLOCK *x) {
int i; int i;
for (i = 16; i < 24; i += 2) { for (i = 16; i < 24; i += 2) {
...@@ -162,7 +163,7 @@ void vp8_transform_mbuv(MACROBLOCK *x) { ...@@ -162,7 +163,7 @@ void vp8_transform_mbuv(MACROBLOCK *x) {
} }
void vp8_transform_intra_mby(MACROBLOCK *x) { void vp8_transform_intra_mby_4x4(MACROBLOCK *x) {
int i; int i;
for (i = 0; i < 16; i += 2) { for (i = 0; i < 16; i += 2) {
...@@ -171,7 +172,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x) { ...@@ -171,7 +172,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x) {
} }
// build dc block from 16 y dc values // build dc block from 16 y dc values
build_dcblock(x); build_dcblock_4x4(x);
// do 2nd order transform on the dc block // do 2nd order transform on the dc block
x->short_walsh4x4(&x->block[24].src_diff[0], x->short_walsh4x4(&x->block[24].src_diff[0],
...@@ -180,7 +181,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x) { ...@@ -180,7 +181,7 @@ void vp8_transform_intra_mby(MACROBLOCK *x) {
} }
static void transform_mb(MACROBLOCK *x) { static void transform_mb_4x4(MACROBLOCK *x) {
int i; int i;
MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode; MB_PREDICTION_MODE mode = x->e_mbd.mode_info_context->mbmi.mode;
...@@ -191,7 +192,7 @@ static void transform_mb(MACROBLOCK *x) { ...@@ -191,7 +192,7 @@ static void transform_mb(MACROBLOCK *x) {
// build dc block from 16 y dc values // build dc block from 16 y dc values
if (mode != SPLITMV) if (mode != SPLITMV)
build_dcblock(x); build_dcblock_4x4(x);
for (i = 16; i < 24; i += 2) { for (i = 16; i < 24; i += 2) {
x->vp8_short_fdct8x4(&x->block[i].src_diff[0], x->vp8_short_fdct8x4(&x->block[i].src_diff[0],
...@@ -206,7 +207,7 @@ static void transform_mb(MACROBLOCK *x) { ...@@ -206,7 +207,7 @@ static void transform_mb(MACROBLOCK *x) {
} }
static void transform_mby(MACROBLOCK *x) { static void transform_mby_4x4(MACROBLOCK *x) {
int i; int i;
for (i = 0; i < 16; i += 2) { for (i = 0; i < 16; i += 2) {
...@@ -216,7 +217,7 @@ static void transform_mby(MACROBLOCK *x) { ...@@ -216,7 +217,7 @@ static void transform_mby(MACROBLOCK *x) {
// build dc block from 16 y dc values // build dc block from 16 y dc values
if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) { if (x->e_mbd.mode_info_context->mbmi.mode != SPLITMV) {
build_dcblock(x); build_dcblock_4x4(x);
x->short_walsh4x4(&x->block[24].src_diff[0], x->short_walsh4x4(&x->block[24].src_diff[0],
&x->block[24].coeff[0], 8); &x->block[24].coeff[0], 8);
} }
...@@ -676,7 +677,7 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type, ...@@ -676,7 +677,7 @@ static void check_reset_8x8_2nd_coeffs(MACROBLOCKD *xd, int type,
} }
} }
static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { static void optimize_mb_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b; int b;
int type; int type;
int has_2nd_order; int has_2nd_order;
...@@ -714,7 +715,7 @@ static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { ...@@ -714,7 +715,7 @@ static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
} }
void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { void vp8_optimize_mby_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b; int b;
int type; int type;
int has_2nd_order; int has_2nd_order;
...@@ -754,7 +755,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { ...@@ -754,7 +755,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
} }
} }
void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) { void vp8_optimize_mbuv_4x4(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) {
int b; int b;
ENTROPY_CONTEXT_PLANES t_above, t_left; ENTROPY_CONTEXT_PLANES t_above, t_left;
ENTROPY_CONTEXT *ta; ENTROPY_CONTEXT *ta;
...@@ -1106,14 +1107,14 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -1106,14 +1107,14 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_transform_mb_8x8(x); vp8_transform_mb_8x8(x);
else else
transform_mb(x); transform_mb_4x4(x);
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
vp8_quantize_mb_16x16(x); vp8_quantize_mb_16x16(x);
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_quantize_mb_8x8(x); vp8_quantize_mb_8x8(x);
else else
vp8_quantize_mb(x); vp8_quantize_mb_4x4(x);
if (x->optimize) { if (x->optimize) {
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
...@@ -1121,7 +1122,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -1121,7 +1122,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
optimize_mb_8x8(x, rtcd); optimize_mb_8x8(x, rtcd);
else else
optimize_mb(x, rtcd); optimize_mb_4x4(x, rtcd);
} }
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
...@@ -1130,7 +1131,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -1130,7 +1131,7 @@ void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
if (tx_type == TX_8X8) if (tx_type == TX_8X8)
vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mb_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else else
vp8_inverse_transform_mb(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mb_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
if (tx_type == TX_8X8) { if (tx_type == TX_8X8) {
#ifdef ENC_DEBUG #ifdef ENC_DEBUG
...@@ -1204,9 +1205,9 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -1204,9 +1205,9 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
else if (tx_type == TX_8X8) else if (tx_type == TX_8X8)
vp8_transform_mby_8x8(x); vp8_transform_mby_8x8(x);
else else
transform_mby(x); transform_mby_4x4(x);
vp8_quantize_mby(x); vp8_quantize_mby_4x4(x);
if (tx_type == TX_16X16) if (tx_type == TX_16X16)
vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mby_16x16(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
...@@ -1214,7 +1215,7 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) { ...@@ -1214,7 +1215,7 @@ void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) {
if (tx_type == TX_8X8) if (tx_type == TX_8X8)
vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mby_8x8(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
else else
vp8_inverse_transform_mby(IF_RTCD(&rtcd->common->idct), &x->e_mbd); vp8_inverse_transform_mby_4x4(IF_RTCD(&rtcd->common->idct), &x->e_mbd);
RECON_INVOKE(&rtcd->common->recon, recon_mby) RECON_INVOKE(&rtcd->common->recon, recon_mby)
(IF_RTCD(&rtcd->common->recon), &x->e_mbd); (IF_RTCD(&rtcd->common->recon), &x->e_mbd);
......
...@@ -105,12 +105,12 @@ struct VP8_ENCODER_RTCD; ...@@ -105,12 +105,12 @@ struct VP8_ENCODER_RTCD;
void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x); void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_build_dcblock(MACROBLOCK *b); void vp8_build_dcblock(MACROBLOCK *b);
void vp8_transform_mb(MACROBLOCK *mb); void vp8_transform_mb_4x4(MACROBLOCK *mb);
void vp8_transform_mbuv(MACROBLOCK *x); void vp8_transform_mbuv_4x4(MACROBLOCK *x);