Commit 5d4cffb3 authored by Ronald S. Bultje's avatar Ronald S. Bultje

Superblock coding.

This commit adds a pick_sb_mode() function which selects the best 32x32
superblock coding mode. Then it selects the best per-MB modes, compares
the two and encodes that in the bitstream.

The bitstream coding is rather simplistic right now. At the SB level,
we code a bit to indicate whether this block uses SB-coding (32x32
prediction) or MB-coding (anything else), and then we follow with the
actual modes. This could and should be modified in the future, but is
omitted from this commit because it will likely involve reorganizing
much more code rather than just adding SB coding, so it's better to let
that be judged on its own merits.

Gains on derf: about even, YT/HD: +0.75%, STD/HD: +1.5%.

Change-Id: Iae313a7cbd8f75b3c66d04a68b991cb096eaaba6
parent 319dd1c0
......@@ -148,6 +148,7 @@ typedef enum {
#define VP8_YMODES (B_PRED + 1)
#define VP8_UV_MODES (TM_PRED + 1)
#define VP8_I8X8_MODES (TM_PRED + 1)
#define VP8_I32X32_MODES (TM_PRED + 1)
#define VP8_MVREFS (1 + SPLITMV - NEARESTMV)
......@@ -293,6 +294,11 @@ typedef struct {
INTERPOLATIONFILTERTYPE interp_filter;
#endif
#if CONFIG_SUPERBLOCKS
// FIXME need a SB array of 4 MB_MODE_INFOs that
// only needs one encoded_as_sb.
unsigned char encoded_as_sb;
#endif
} MB_MODE_INFO;
typedef struct {
......
......@@ -227,6 +227,14 @@ const vp8_tree_index vp8_mv_ref_tree[8] = {
-NEWMV, -SPLITMV
};
#if CONFIG_SUPERBLOCKS
const vp8_tree_index vp8_sb_mv_ref_tree[6] = {
-ZEROMV, 2,
-NEARESTMV, 4,
-NEARMV, -NEWMV
};
#endif
const vp8_tree_index vp8_sub_mv_ref_tree[6] = {
-LEFT4X4, 2,
-ABOVE4X4, 4,
......@@ -236,12 +244,18 @@ const vp8_tree_index vp8_sub_mv_ref_tree[6] = {
struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES];
struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES];
#if CONFIG_SUPERBLOCKS
struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES];
#endif
struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES];
struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES];
struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_UV_MODES];
struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES];
struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS];
struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
#if CONFIG_SUPERBLOCKS
struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
#endif
struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
......@@ -253,11 +267,18 @@ void vp8_init_mbmode_probs(VP8_COMMON *x) {
vp8_ymode_tree, x->fc.ymode_prob, bct, y_mode_cts, 256, 1);
{
int i;
for (i = 0; i < 8; i++)
for (i = 0; i < 8; i++) {
vp8_tree_probs_from_distribution(
VP8_YMODES, vp8_kf_ymode_encodings, vp8_kf_ymode_tree,
x->kf_ymode_prob[i], bct, kf_y_mode_cts[i],
256, 1);
#if CONFIG_SUPERBLOCKS
vp8_tree_probs_from_distribution(
VP8_I32X32_MODES, vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree,
x->sb_kf_ymode_prob[i], bct, kf_y_mode_cts[i],
256, 1);
#endif
}
}
{
int i;
......@@ -360,6 +381,9 @@ void vp8_entropy_mode_init() {
vp8_tokens_from_tree(vp8_bmode_encodings, vp8_bmode_tree);
vp8_tokens_from_tree(vp8_ymode_encodings, vp8_ymode_tree);
vp8_tokens_from_tree(vp8_kf_ymode_encodings, vp8_kf_ymode_tree);
#if CONFIG_SUPERBLOCKS
vp8_tokens_from_tree(vp8_sb_kf_ymode_encodings, vp8_sb_ymode_tree);
#endif
vp8_tokens_from_tree(vp8_uv_mode_encodings, vp8_uv_mode_tree);
vp8_tokens_from_tree(vp8_i8x8_mode_encodings, vp8_i8x8_mode_tree);
vp8_tokens_from_tree(vp8_mbsplit_encodings, vp8_mbsplit_tree);
......@@ -370,6 +394,10 @@ void vp8_entropy_mode_init() {
vp8_tokens_from_tree_offset(vp8_mv_ref_encoding_array,
vp8_mv_ref_tree, NEARESTMV);
#if CONFIG_SUPERBLOCKS
vp8_tokens_from_tree_offset(vp8_sb_mv_ref_encoding_array,
vp8_sb_mv_ref_tree, NEARESTMV);
#endif
vp8_tokens_from_tree_offset(vp8_sub_mv_ref_encoding_array,
vp8_sub_mv_ref_tree, LEFT4X4);
}
......
......@@ -40,21 +40,25 @@ extern const vp8_tree_index vp8_bmode_tree[];
extern const vp8_tree_index vp8_ymode_tree[];
extern const vp8_tree_index vp8_kf_ymode_tree[];
extern const vp8_tree_index vp8_uv_mode_tree[];
#define vp8_sb_ymode_tree vp8_uv_mode_tree
extern const vp8_tree_index vp8_i8x8_mode_tree[];
extern const vp8_tree_index vp8_mbsplit_tree[];
extern const vp8_tree_index vp8_mv_ref_tree[];
extern const vp8_tree_index vp8_sb_mv_ref_tree[];
extern const vp8_tree_index vp8_sub_mv_ref_tree[];
extern struct vp8_token_struct vp8_bmode_encodings [VP8_BINTRAMODES];
extern struct vp8_token_struct vp8_ymode_encodings [VP8_YMODES];
extern struct vp8_token_struct vp8_sb_kf_ymode_encodings [VP8_I32X32_MODES];
extern struct vp8_token_struct vp8_kf_ymode_encodings [VP8_YMODES];
extern struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_UV_MODES];
extern struct vp8_token_struct vp8_i8x8_mode_encodings [VP8_I8X8_MODES];
extern struct vp8_token_struct vp8_uv_mode_encodings [VP8_UV_MODES];
extern struct vp8_token_struct vp8_mbsplit_encodings [VP8_NUMMBSPLITS];
/* Inter mode values do not start at zero */
extern struct vp8_token_struct vp8_mv_ref_encoding_array [VP8_MVREFS];
extern struct vp8_token_struct vp8_sb_mv_ref_encoding_array [VP8_MVREFS];
extern struct vp8_token_struct vp8_sub_mv_ref_encoding_array [VP8_SUBMVREFS];
void vp8_entropy_mode_init(void);
......
......@@ -47,6 +47,12 @@ void vp8_machine_specific_config(VP8_COMMON *ctx) {
rtcd->recon.recon4 = vp8_recon4b_c;
rtcd->recon.recon_mb = vp8_recon_mb_c;
rtcd->recon.recon_mby = vp8_recon_mby_c;
#if CONFIG_SUPERBLOCKS
rtcd->recon.build_intra_predictors_sby_s =
vp8_build_intra_predictors_sby_s;
rtcd->recon.build_intra_predictors_sbuv_s =
vp8_build_intra_predictors_sbuv_s;
#endif
rtcd->recon.build_intra_predictors_mby =
vp8_build_intra_predictors_mby;
#if CONFIG_COMP_INTRA_PRED
......
......@@ -325,7 +325,13 @@ void vp8_loop_filter_frame
lfi.lim = lfi_n->lim[filter_level];
lfi.hev_thr = lfi_n->hev_thr[hev_index];
if (mb_col > 0)
if (mb_col > 0
#if CONFIG_SUPERBLOCKS
&& !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb &&
mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-1].mbmi.mb_skip_coeff)
#endif
)
vp8_loop_filter_mbv_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
......@@ -344,7 +350,13 @@ void vp8_loop_filter_frame
}
/* don't apply across umv border */
if (mb_row > 0)
if (mb_row > 0
#if CONFIG_SUPERBLOCKS
&& !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb &&
mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
#endif
)
vp8_loop_filter_mbh_c
(y_ptr, u_ptr, v_ptr, post->y_stride, post->uv_stride, &lfi);
......@@ -362,7 +374,13 @@ void vp8_loop_filter_frame
}
} else {
// FIXME: Not 8x8 aware
if (mb_col > 0)
if (mb_col > 0
#if CONFIG_SUPERBLOCKS
&& !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb &&
mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-1].mbmi.mb_skip_coeff)
#endif
)
LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_v)
(y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
......@@ -371,7 +389,13 @@ void vp8_loop_filter_frame
(y_ptr, post->y_stride, lfi_n->blim[filter_level]);
/* don't apply across umv border */
if (mb_row > 0)
if (mb_row > 0
#if CONFIG_SUPERBLOCKS
&& !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb &&
mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
#endif
)
LF_INVOKE(&cm->rtcd.loopfilter, simple_mb_h)
(y_ptr, post->y_stride, lfi_n->mblim[filter_level]);
......
......@@ -226,12 +226,15 @@ typedef struct VP8Common {
/* Y,U,V,Y2 */
ENTROPY_CONTEXT_PLANES *above_context; /* row of context for each plane */
ENTROPY_CONTEXT_PLANES left_context; /* (up to) 4 contexts "" */
ENTROPY_CONTEXT_PLANES left_context[2]; /* (up to) 4 contexts "" */
/* keyframe block modes are predicted by their above, left neighbors */
vp8_prob kf_bmode_prob [VP8_BINTRAMODES] [VP8_BINTRAMODES] [VP8_BINTRAMODES - 1];
vp8_prob kf_ymode_prob[8][VP8_YMODES - 1]; /* keyframe "" */
#if CONFIG_SUPERBLOCKS
vp8_prob sb_kf_ymode_prob[8][VP8_I32X32_MODES - 1];
#endif
int kf_ymode_probs_index;
int kf_ymode_probs_update;
vp8_prob kf_uv_mode_prob[VP8_YMODES] [VP8_UV_MODES - 1];
......@@ -239,6 +242,9 @@ typedef struct VP8Common {
vp8_prob prob_intra_coded;
vp8_prob prob_last_coded;
vp8_prob prob_gf_coded;
#if CONFIG_SUPERBLOCKS
vp8_prob sb_coded;
#endif
// Context probabilities when using predictive coding of segment id
vp8_prob segment_pred_probs[PREDICTION_PROBS];
......
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
......@@ -224,10 +225,24 @@ void set_pred_flag(MACROBLOCKD *const xd,
switch (pred_id) {
case PRED_SEG_ID:
xd->mode_info_context->mbmi.seg_id_predicted = pred_flag;
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context[1].mbmi.seg_id_predicted = pred_flag;
xd->mode_info_context[xd->mode_info_stride].mbmi.seg_id_predicted = pred_flag;
xd->mode_info_context[xd->mode_info_stride+1].mbmi.seg_id_predicted = pred_flag;
}
#endif
break;
case PRED_REF:
xd->mode_info_context->mbmi.ref_predicted = pred_flag;
#if CONFIG_SUPERBLOCKS
if (xd->mode_info_context->mbmi.encoded_as_sb) {
xd->mode_info_context[1].mbmi.ref_predicted = pred_flag;
xd->mode_info_context[xd->mode_info_stride].mbmi.ref_predicted = pred_flag;
xd->mode_info_context[xd->mode_info_stride+1].mbmi.ref_predicted = pred_flag;
}
#endif
break;
case PRED_MBSKIP:
......
......@@ -124,6 +124,52 @@ void vp8_recon2b_c
}
}
#if CONFIG_SUPERBLOCKS
void vp8_recon_mby_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *dst) {
int x, y;
BLOCKD *b = &xd->block[0];
int stride = b->dst_stride;
short *diff = b->diff;
for (y = 0; y < 16; y++) {
for (x = 0; x < 16; x++) {
int a = dst[x] + diff[x];
if (a < 0)
a = 0;
else if (a > 255)
a = 255;
dst[x] = a;
}
dst += stride;
diff += 16;
}
}
void vp8_recon_mbuv_s_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
int x, y, i;
uint8_t *dst = udst;
for (i = 0; i < 2; i++, dst = vdst) {
BLOCKD *b = &xd->block[16 + 4 * i];
int stride = b->dst_stride;
short *diff = b->diff;
for (y = 0; y < 8; y++) {
for (x = 0; x < 8; x++) {
int a = dst[x] + diff[x];
if (a < 0)
a = 0;
else if (a > 255)
a = 255;
dst[x] = a;
}
dst += stride;
diff += 8;
}
}
}
#endif
void vp8_recon_mby_c(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *xd) {
#if ARCH_ARM
BLOCKD *b = &xd->block[0];
......
......@@ -100,6 +100,11 @@ extern prototype_recon_macroblock(vp8_recon_recon_mb);
#endif
extern prototype_recon_macroblock(vp8_recon_recon_mby);
#ifndef vp8_recon_build_intra_predictors_sby_s
#define vp8_recon_build_intra_predictors_sby_s vp8_build_intra_predictors_sby_s
#endif
extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sby_s);
#ifndef vp8_recon_build_intra_predictors_mby
#define vp8_recon_build_intra_predictors_mby vp8_build_intra_predictors_mby
#endif
......@@ -126,6 +131,11 @@ extern prototype_build_intra_predictors\
extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mby_s);
#ifndef vp8_recon_build_intra_predictors_sbuv_s
#define vp8_recon_build_intra_predictors_sbuv_s vp8_build_intra_predictors_sbuv_s
#endif
extern prototype_build_intra_predictors(vp8_recon_build_intra_predictors_sbuv_s);
#ifndef vp8_recon_build_intra_predictors_mbuv
#define vp8_recon_build_intra_predictors_mbuv vp8_build_intra_predictors_mbuv
#endif
......@@ -214,10 +224,16 @@ typedef struct vp8_recon_rtcd_vtable {
vp8_recon_fn_t recon4;
vp8_recon_mb_fn_t recon_mb;
vp8_recon_mb_fn_t recon_mby;
#if CONFIG_SUPERBLOCKS
vp8_build_intra_pred_fn_t build_intra_predictors_sby_s;
#endif
vp8_build_intra_pred_fn_t build_intra_predictors_mby_s;
vp8_build_intra_pred_fn_t build_intra_predictors_mby;
#if CONFIG_COMP_INTRA_PRED
vp8_build_intra_pred_fn_t build_comp_intra_predictors_mby;
#endif
#if CONFIG_SUPERBLOCKS
vp8_build_intra_pred_fn_t build_intra_predictors_sbuv_s;
#endif
vp8_build_intra_pred_fn_t build_intra_predictors_mbuv_s;
vp8_build_intra_pred_fn_t build_intra_predictors_mbuv;
......
......@@ -759,6 +759,56 @@ void vp8_build_1st_inter16x16_predictors_mb(MACROBLOCKD *xd,
vp8_build_1st_inter16x16_predictors_mbuv(xd, dst_u, dst_v, dst_uvstride);
}
#if CONFIG_SUPERBLOCKS
void vp8_build_inter32x32_predictors_sb(MACROBLOCKD *x,
unsigned char *dst_y,
unsigned char *dst_u,
unsigned char *dst_v,
int dst_ystride,
int dst_uvstride) {
uint8_t *y1 = x->pre.y_buffer, *u1 = x->pre.u_buffer, *v1 = x->pre.v_buffer;
uint8_t *y2 = x->second_pre.y_buffer, *u2 = x->second_pre.u_buffer,
*v2 = x->second_pre.v_buffer;
int n;
for (n = 0; n < 4; n++)
{
const int x_idx = n & 1, y_idx = n >> 1;
x->pre.y_buffer = y1 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
x->pre.u_buffer = u1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
x->pre.v_buffer = v1 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
vp8_build_1st_inter16x16_predictors_mb(x,
dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
dst_ystride, dst_uvstride);
if (x->mode_info_context->mbmi.second_ref_frame) {
x->second_pre.y_buffer = y2 + y_idx * 16 * x->pre.y_stride + x_idx * 16;
x->second_pre.u_buffer = u2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
x->second_pre.v_buffer = v2 + y_idx * 8 * x->pre.uv_stride + x_idx * 8;
vp8_build_2nd_inter16x16_predictors_mb(x,
dst_y + y_idx * 16 * dst_ystride + x_idx * 16,
dst_u + y_idx * 8 * dst_uvstride + x_idx * 8,
dst_v + y_idx * 8 * dst_uvstride + x_idx * 8,
dst_ystride, dst_uvstride);
}
}
x->pre.y_buffer = y1;
x->pre.u_buffer = u1;
x->pre.v_buffer = v1;
if (x->mode_info_context->mbmi.second_ref_frame) {
x->second_pre.y_buffer = y2;
x->second_pre.u_buffer = u2;
x->second_pre.v_buffer = v2;
}
}
#endif
/*
* The following functions should be called after an initial
* call to vp8_build_inter16x16_predictors_mb() or _mby()/_mbuv().
......
......@@ -207,17 +207,18 @@ void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd,
}
}
void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
unsigned char *ypred_ptr,
int y_stride, int mode) {
void vp8_build_intra_predictors_internal(MACROBLOCKD *xd,
unsigned char *src, int src_stride,
unsigned char *ypred_ptr,
int y_stride, int mode, int bsize) {
unsigned char *yabove_row = xd->dst.y_buffer - xd->dst.y_stride;
unsigned char yleft_col[16];
unsigned char *yabove_row = src - src_stride;
unsigned char yleft_col[32];
unsigned char ytop_left = yabove_row[-1];
int r, c, i;
for (i = 0; i < 16; i++) {
yleft_col[i] = xd->dst.y_buffer [i * xd->dst.y_stride - 1];
for (i = 0; i < bsize; i++) {
yleft_col[i] = xd->dst.y_buffer [i * src_stride - 1];
}
/* for Y */
......@@ -227,58 +228,58 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
int i;
int shift;
int average = 0;
int log2_bsize_minus_1;
assert(bsize == 8 || bsize == 16 || bsize == 32);
if (bsize == 8) {
log2_bsize_minus_1 = 2;
} else if (bsize == 16) {
log2_bsize_minus_1 = 3;
} else /* bsize == 32 */ {
log2_bsize_minus_1 = 4;
}
if (xd->up_available || xd->left_available) {
if (xd->up_available) {
for (i = 0; i < 16; i++) {
for (i = 0; i < bsize; i++) {
average += yabove_row[i];
}
}
if (xd->left_available) {
for (i = 0; i < 16; i++) {
for (i = 0; i < bsize; i++) {
average += yleft_col[i];
}
}
shift = 3 + xd->up_available + xd->left_available;
shift = log2_bsize_minus_1 + xd->up_available + xd->left_available;
expected_dc = (average + (1 << (shift - 1))) >> shift;
} else {
expected_dc = 128;
}
for (r = 0; r < 16; r++) {
vpx_memset(ypred_ptr, expected_dc, 16);
ypred_ptr += y_stride; /*16;*/
for (r = 0; r < bsize; r++) {
vpx_memset(ypred_ptr, expected_dc, bsize);
ypred_ptr += y_stride;
}
}
break;
case V_PRED: {
for (r = 0; r < 16; r++) {
((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
for (r = 0; r < bsize; r++) {
memcpy(ypred_ptr, yabove_row, bsize);
ypred_ptr += y_stride;
}
}
break;
case H_PRED: {
for (r = 0; r < 16; r++) {
vpx_memset(ypred_ptr, yleft_col[r], 16);
for (r = 0; r < bsize; r++) {
vpx_memset(ypred_ptr, yleft_col[r], bsize);
ypred_ptr += y_stride;
}
}
break;
case TM_PRED: {
for (r = 0; r < 16; r++) {
for (c = 0; c < 16; c++) {
for (r = 0; r < bsize; r++) {
for (c = 0; c < bsize; c++) {
int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
if (pred < 0)
......@@ -292,31 +293,30 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
ypred_ptr += y_stride;
}
}
break;
case D45_PRED: {
d45_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
d45_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D135_PRED: {
d135_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
d135_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D117_PRED: {
d117_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
d117_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D153_PRED: {
d153_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
d153_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D27_PRED: {
d27_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
d27_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case D63_PRED: {
d63_predictor(ypred_ptr, y_stride, 16, yabove_row, yleft_col);
d63_predictor(ypred_ptr, y_stride, bsize, yabove_row, yleft_col);
}
break;
case I8X8_PRED:
......@@ -332,25 +332,36 @@ void vp8_build_intra_predictors_mby_internal(MACROBLOCKD *xd,
}
void vp8_build_intra_predictors_mby(MACROBLOCKD *xd) {
vp8_build_intra_predictors_mby_internal(xd, xd->predictor, 16,
xd->mode_info_context->mbmi.mode);
vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
xd->predictor, 16,
xd->mode_info_context->mbmi.mode, 16);
}
void vp8_build_intra_predictors_mby_s(MACROBLOCKD *xd) {
vp8_build_intra_predictors_mby_internal(xd, xd->dst.y_buffer,
xd->dst.y_stride,
xd->mode_info_context->mbmi.mode);
vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
xd->dst.y_buffer, xd->dst.y_stride,
xd->mode_info_context->mbmi.mode, 16);
}
#if CONFIG_SUPERBLOCKS
void vp8_build_intra_predictors_sby_s(MACROBLOCKD *x) {
vp8_build_intra_predictors_internal(x, x->dst.y_buffer, x->dst.y_stride,
x->dst.y_buffer, x->dst.y_stride,
x->mode_info_context->mbmi.mode, 32);
}
#endif
#if CONFIG_COMP_INTRA_PRED
void vp8_build_comp_intra_predictors_mby(MACROBLOCKD *xd) {
unsigned char predictor[2][256];
int i;
vp8_build_intra_predictors_mby_internal(
xd, predictor[0], 16, xd->mode_info_context->mbmi.mode);
vp8_build_intra_predictors_mby_internal(
xd, predictor[1], 16, xd->mode_info_context->mbmi.second_mode);
vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
predictor[0], 16,
xd->mode_info_context->mbmi.mode);
vp8_build_intra_predictors_internal(xd, xd->dst.y_buffer, xd->dst.y_stride,
predictor[1], 16,
xd->mode_info_context->mbmi.second_mode);
for (i = 0; i < 256; i++) {
xd->predictor[i] = (predictor[0][i] + predictor[1][i] + 1) >> 1;
......@@ -362,172 +373,37 @@ void vp8_build_intra_predictors_mbuv_internal(MACROBLOCKD *xd,
unsigned char *upred_ptr,
unsigned char *vpred_ptr,
int uv_stride,
int mode) {
YV12_BUFFER_CONFIG * dst = &xd->dst;
unsigned char *uabove_row = dst->u_buffer - dst->uv_stride;
unsigned char uleft_col[16];
unsigned char utop_left = uabove_row[-1];
unsigned char *vabove_row = dst->v_buffer - dst->uv_stride;
unsigned char vleft_col[20];
unsigned char vtop_left = vabove_row[-1];
int i, j;
for (i = 0; i < 8; i++) {
uleft_col[i] = dst->u_buffer [i * dst->uv_stride - 1];
vleft_col[i] = dst->v_buffer [i * dst->uv_stride - 1];
}
switch (mode) {
case DC_PRED: {
int expected_udc;
int expected_vdc;
int i;
int shift;
int Uaverage = 0;
int Vaverage = 0;
if (xd->up_available) {
for (i = 0; i < 8; i++) {