Commit 8b17f7f4 authored by Scott LaVarnway's avatar Scott LaVarnway
Browse files

Revert "Remove mi_grid_* structures."

(see I3a05cf1610679fed26e0b2eadd315a9ae91afdd6)

For the test clip used, the decoder performance improved by ~2%.
This is also an intermediate step towards adding back the
mode_info streams.

Change-Id: Idddc4a3f46e4180fbebddc156c4bbf177d5c2e0d
parent 924d06a0
...@@ -40,7 +40,7 @@ void vp9_foreach_transformed_block_in_plane( ...@@ -40,7 +40,7 @@ void vp9_foreach_transformed_block_in_plane(
const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane, const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
foreach_transformed_block_visitor visit, void *arg) { foreach_transformed_block_visitor visit, void *arg) {
const struct macroblockd_plane *const pd = &xd->plane[plane]; const struct macroblockd_plane *const pd = &xd->plane[plane];
const MB_MODE_INFO* mbmi = &xd->mi[0].src_mi->mbmi; const MB_MODE_INFO* mbmi = &xd->mi[0]->mbmi;
// block and transform sizes, in number of 4x4 blocks log 2 ("*_b") // block and transform sizes, in number of 4x4 blocks log 2 ("*_b")
// 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8 // 4x4=0, 8x8=2, 16x16=4, 32x32=6, 64x64=8
// transform size varies per plane, look it up in a common way. // transform size varies per plane, look it up in a common way.
......
...@@ -130,7 +130,6 @@ typedef struct { ...@@ -130,7 +130,6 @@ typedef struct {
} MB_MODE_INFO; } MB_MODE_INFO;
typedef struct MODE_INFO { typedef struct MODE_INFO {
struct MODE_INFO *src_mi;
MB_MODE_INFO mbmi; MB_MODE_INFO mbmi;
b_mode_info bmi[4]; b_mode_info bmi[4];
} MODE_INFO; } MODE_INFO;
...@@ -191,7 +190,7 @@ typedef struct macroblockd { ...@@ -191,7 +190,7 @@ typedef struct macroblockd {
int mi_stride; int mi_stride;
MODE_INFO *mi; MODE_INFO **mi;
MODE_INFO *left_mi; MODE_INFO *left_mi;
MODE_INFO *above_mi; MODE_INFO *above_mi;
MB_MODE_INFO *left_mbmi; MB_MODE_INFO *left_mbmi;
...@@ -245,7 +244,7 @@ extern const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES]; ...@@ -245,7 +244,7 @@ extern const TX_TYPE intra_mode_to_tx_type_lookup[INTRA_MODES];
static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type, static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type,
const MACROBLOCKD *xd) { const MACROBLOCKD *xd) {
const MB_MODE_INFO *const mbmi = &xd->mi[0].src_mi->mbmi; const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi)) if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(mbmi))
return DCT_DCT; return DCT_DCT;
...@@ -255,7 +254,7 @@ static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type, ...@@ -255,7 +254,7 @@ static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type,
static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type, static INLINE TX_TYPE get_tx_type_4x4(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int ib) { const MACROBLOCKD *xd, int ib) {
const MODE_INFO *const mi = xd->mi[0].src_mi; const MODE_INFO *const mi = xd->mi[0];
if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi)) if (plane_type != PLANE_TYPE_Y || xd->lossless || is_inter_block(&mi->mbmi))
return DCT_DCT; return DCT_DCT;
......
...@@ -35,7 +35,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor, ...@@ -35,7 +35,7 @@ static void print_mi_data(VP9_COMMON *cm, FILE *file, const char *descriptor,
fprintf(file, "%c ", prefix); fprintf(file, "%c ", prefix);
for (mi_col = 0; mi_col < cols; mi_col++) { for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(file, "%2d ", fprintf(file, "%2d ",
*((int*) ((char *) (&mi->src_mi->mbmi) + *((int*) ((char *) (&mi->mbmi) +
member_offset))); member_offset)));
mi++; mi++;
} }
...@@ -64,7 +64,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) { ...@@ -64,7 +64,7 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) { for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "S "); fprintf(mvs, "S ");
for (mi_col = 0; mi_col < cols; mi_col++) { for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%2d ", mi->src_mi->mbmi.skip); fprintf(mvs, "%2d ", mi->mbmi.skip);
mi++; mi++;
} }
fprintf(mvs, "\n"); fprintf(mvs, "\n");
...@@ -78,8 +78,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) { ...@@ -78,8 +78,8 @@ void vp9_print_modes_and_motion_vectors(VP9_COMMON *cm, const char *file) {
for (mi_row = 0; mi_row < rows; mi_row++) { for (mi_row = 0; mi_row < rows; mi_row++) {
fprintf(mvs, "V "); fprintf(mvs, "V ");
for (mi_col = 0; mi_col < cols; mi_col++) { for (mi_col = 0; mi_col < cols; mi_col++) {
fprintf(mvs, "%4d:%4d ", mi->src_mi->mbmi.mv[0].as_mv.row, fprintf(mvs, "%4d:%4d ", mi->mbmi.mv[0].as_mv.row,
mi->src_mi->mbmi.mv[0].as_mv.col); mi->mbmi.mv[0].as_mv.col);
mi++; mi++;
} }
fprintf(mvs, "\n"); fprintf(mvs, "\n");
......
...@@ -216,7 +216,7 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a, ...@@ -216,7 +216,7 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
static INLINE const scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size, static INLINE const scan_order *get_scan(const MACROBLOCKD *xd, TX_SIZE tx_size,
PLANE_TYPE type, int block_idx) { PLANE_TYPE type, int block_idx) {
const MODE_INFO *const mi = xd->mi[0].src_mi; const MODE_INFO *const mi = xd->mi[0];
if (is_inter_block(&mi->mbmi) || type != PLANE_TYPE_Y || xd->lossless) { if (is_inter_block(&mi->mbmi) || type != PLANE_TYPE_Y || xd->lossless) {
return &vp9_default_scan_orders[tx_size]; return &vp9_default_scan_orders[tx_size];
......
...@@ -826,12 +826,12 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n, ...@@ -826,12 +826,12 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
// by mi_row, mi_col. // by mi_row, mi_col.
// TODO(JBB): This function only works for yv12. // TODO(JBB): This function only works for yv12.
void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
MODE_INFO *mi, const int mode_info_stride, MODE_INFO **mi, const int mode_info_stride,
LOOP_FILTER_MASK *lfm) { LOOP_FILTER_MASK *lfm) {
int idx_32, idx_16, idx_8; int idx_32, idx_16, idx_8;
const loop_filter_info_n *const lfi_n = &cm->lf_info; const loop_filter_info_n *const lfi_n = &cm->lf_info;
MODE_INFO *mip = mi; MODE_INFO **mip = mi;
MODE_INFO *mip2 = mi; MODE_INFO **mip2 = mi;
// These are offsets to the next mi in the 64x64 block. It is what gets // These are offsets to the next mi in the 64x64 block. It is what gets
// added to the mi ptr as we go through each loop. It helps us to avoid // added to the mi ptr as we go through each loop. It helps us to avoid
...@@ -859,28 +859,28 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, ...@@ -859,28 +859,28 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
cm->mi_cols - mi_col : MI_BLOCK_SIZE); cm->mi_cols - mi_col : MI_BLOCK_SIZE);
vp9_zero(*lfm); vp9_zero(*lfm);
assert(mip != NULL); assert(mip[0] != NULL);
// TODO(jimbankoski): Try moving most of the following code into decode // TODO(jimbankoski): Try moving most of the following code into decode
// loop and storing lfm in the mbmi structure so that we don't have to go // loop and storing lfm in the mbmi structure so that we don't have to go
// through the recursive loop structure multiple times. // through the recursive loop structure multiple times.
switch (mip->mbmi.sb_type) { switch (mip[0]->mbmi.sb_type) {
case BLOCK_64X64: case BLOCK_64X64:
build_masks(lfi_n, mip , 0, 0, lfm); build_masks(lfi_n, mip[0] , 0, 0, lfm);
break; break;
case BLOCK_64X32: case BLOCK_64X32:
build_masks(lfi_n, mip, 0, 0, lfm); build_masks(lfi_n, mip[0], 0, 0, lfm);
mip2 = mip + mode_info_stride * 4; mip2 = mip + mode_info_stride * 4;
if (4 >= max_rows) if (4 >= max_rows)
break; break;
build_masks(lfi_n, mip2, 32, 8, lfm); build_masks(lfi_n, mip2[0], 32, 8, lfm);
break; break;
case BLOCK_32X64: case BLOCK_32X64:
build_masks(lfi_n, mip, 0, 0, lfm); build_masks(lfi_n, mip[0], 0, 0, lfm);
mip2 = mip + 4; mip2 = mip + 4;
if (4 >= max_cols) if (4 >= max_cols)
break; break;
build_masks(lfi_n, mip2, 4, 2, lfm); build_masks(lfi_n, mip2[0], 4, 2, lfm);
break; break;
default: default:
for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) { for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
...@@ -890,23 +890,23 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, ...@@ -890,23 +890,23 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
const int mi_32_row_offset = ((idx_32 >> 1) << 2); const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows) if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue; continue;
switch (mip->mbmi.sb_type) { switch (mip[0]->mbmi.sb_type) {
case BLOCK_32X32: case BLOCK_32X32:
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break; break;
case BLOCK_32X16: case BLOCK_32X16:
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_32_row_offset + 2 >= max_rows) if (mi_32_row_offset + 2 >= max_rows)
continue; continue;
mip2 = mip + mode_info_stride * 2; mip2 = mip + mode_info_stride * 2;
build_masks(lfi_n, mip2, shift_y + 16, shift_uv + 4, lfm); build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
break; break;
case BLOCK_16X32: case BLOCK_16X32:
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_32_col_offset + 2 >= max_cols) if (mi_32_col_offset + 2 >= max_cols)
continue; continue;
mip2 = mip + 2; mip2 = mip + 2;
build_masks(lfi_n, mip2, shift_y + 2, shift_uv + 1, lfm); build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
break; break;
default: default:
for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) { for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
...@@ -920,29 +920,29 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, ...@@ -920,29 +920,29 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows) if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows)
continue; continue;
switch (mip->mbmi.sb_type) { switch (mip[0]->mbmi.sb_type) {
case BLOCK_16X16: case BLOCK_16X16:
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
break; break;
case BLOCK_16X8: case BLOCK_16X8:
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_16_row_offset + 1 >= max_rows) if (mi_16_row_offset + 1 >= max_rows)
continue; continue;
mip2 = mip + mode_info_stride; mip2 = mip + mode_info_stride;
build_y_mask(lfi_n, mip2, shift_y+8, lfm); build_y_mask(lfi_n, mip2[0], shift_y+8, lfm);
break; break;
case BLOCK_8X16: case BLOCK_8X16:
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
if (mi_16_col_offset +1 >= max_cols) if (mi_16_col_offset +1 >= max_cols)
continue; continue;
mip2 = mip + 1; mip2 = mip + 1;
build_y_mask(lfi_n, mip2, shift_y+1, lfm); build_y_mask(lfi_n, mip2[0], shift_y+1, lfm);
break; break;
default: { default: {
const int shift_y = shift_32_y[idx_32] + const int shift_y = shift_32_y[idx_32] +
shift_16_y[idx_16] + shift_16_y[idx_16] +
shift_8_y[0]; shift_8_y[0];
build_masks(lfi_n, mip, shift_y, shift_uv, lfm); build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
mip += offset[0]; mip += offset[0];
for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) { for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
const int shift_y = shift_32_y[idx_32] + const int shift_y = shift_32_y[idx_32] +
...@@ -956,7 +956,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col, ...@@ -956,7 +956,7 @@ void vp9_setup_mask(VP9_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_8_col_offset >= max_cols || if (mi_8_col_offset >= max_cols ||
mi_8_row_offset >= max_rows) mi_8_row_offset >= max_rows)
continue; continue;
build_y_mask(lfi_n, mip, shift_y, lfm); build_y_mask(lfi_n, mip[0], shift_y, lfm);
} }
break; break;
} }
...@@ -1151,7 +1151,7 @@ static void highbd_filter_selectively_vert(uint16_t *s, int pitch, ...@@ -1151,7 +1151,7 @@ static void highbd_filter_selectively_vert(uint16_t *s, int pitch,
void vp9_filter_block_plane_non420(VP9_COMMON *cm, void vp9_filter_block_plane_non420(VP9_COMMON *cm,
struct macroblockd_plane *plane, struct macroblockd_plane *plane,
MODE_INFO *mi_8x8, MODE_INFO **mi_8x8,
int mi_row, int mi_col) { int mi_row, int mi_col) {
const int ss_x = plane->subsampling_x; const int ss_x = plane->subsampling_x;
const int ss_y = plane->subsampling_y; const int ss_y = plane->subsampling_y;
...@@ -1175,7 +1175,7 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm, ...@@ -1175,7 +1175,7 @@ void vp9_filter_block_plane_non420(VP9_COMMON *cm,
// Determine the vertical edges that need filtering // Determine the vertical edges that need filtering
for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) { for (c = 0; c < MI_BLOCK_SIZE && mi_col + c < cm->mi_cols; c += col_step) {
const MODE_INFO *mi = mi_8x8[c].src_mi; const MODE_INFO *mi = mi_8x8[c];
const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type; const BLOCK_SIZE sb_type = mi[0].mbmi.sb_type;
const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi); const int skip_this = mi[0].mbmi.skip && is_inter_block(&mi[0].mbmi);
// left edge of current unit is block/partition edge -> no skip // left edge of current unit is block/partition edge -> no skip
...@@ -1545,7 +1545,7 @@ void vp9_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, ...@@ -1545,7 +1545,7 @@ void vp9_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
path = LF_PATH_SLOW; path = LF_PATH_SLOW;
for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) { for (mi_row = start; mi_row < stop; mi_row += MI_BLOCK_SIZE) {
MODE_INFO *mi = cm->mi + mi_row * cm->mi_stride; MODE_INFO **mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
int plane; int plane;
......
...@@ -95,7 +95,7 @@ struct VP9LfSyncData; ...@@ -95,7 +95,7 @@ struct VP9LfSyncData;
// by mi_row, mi_col. // by mi_row, mi_col.
void vp9_setup_mask(struct VP9Common *const cm, void vp9_setup_mask(struct VP9Common *const cm,
const int mi_row, const int mi_col, const int mi_row, const int mi_col,
MODE_INFO *mi_8x8, const int mode_info_stride, MODE_INFO **mi_8x8, const int mode_info_stride,
LOOP_FILTER_MASK *lfm); LOOP_FILTER_MASK *lfm);
void vp9_filter_block_plane_ss00(struct VP9Common *const cm, void vp9_filter_block_plane_ss00(struct VP9Common *const cm,
...@@ -110,7 +110,7 @@ void vp9_filter_block_plane_ss11(struct VP9Common *const cm, ...@@ -110,7 +110,7 @@ void vp9_filter_block_plane_ss11(struct VP9Common *const cm,
void vp9_filter_block_plane_non420(struct VP9Common *cm, void vp9_filter_block_plane_non420(struct VP9Common *cm,
struct macroblockd_plane *plane, struct macroblockd_plane *plane,
MODE_INFO *mi_8x8, MODE_INFO **mi_8x8,
int mi_row, int mi_col); int mi_row, int mi_col);
void vp9_loop_filter_init(struct VP9Common *cm); void vp9_loop_filter_init(struct VP9Common *cm);
......
...@@ -37,7 +37,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, ...@@ -37,7 +37,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const POSITION *const mv_ref = &mv_ref_search[i]; const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row * const MODE_INFO *const candidate_mi = xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride].src_mi; xd->mi_stride];
const MB_MODE_INFO *const candidate = &candidate_mi->mbmi; const MB_MODE_INFO *const candidate = &candidate_mi->mbmi;
// Keep counts for entropy encoding. // Keep counts for entropy encoding.
context_counter += mode_2_counter[candidate->mode]; context_counter += mode_2_counter[candidate->mode];
...@@ -59,7 +59,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, ...@@ -59,7 +59,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const POSITION *const mv_ref = &mv_ref_search[i]; const POSITION *const mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row * const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row *
xd->mi_stride].src_mi->mbmi; xd->mi_stride]->mbmi;
different_ref_found = 1; different_ref_found = 1;
if (candidate->ref_frame[0] == ref_frame) if (candidate->ref_frame[0] == ref_frame)
...@@ -101,7 +101,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd, ...@@ -101,7 +101,7 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const POSITION *mv_ref = &mv_ref_search[i]; const POSITION *mv_ref = &mv_ref_search[i];
if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) { if (is_inside(tile, mi_col, mi_row, cm->mi_rows, mv_ref)) {
const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row const MB_MODE_INFO *const candidate = &xd->mi[mv_ref->col + mv_ref->row
* xd->mi_stride].src_mi->mbmi; * xd->mi_stride]->mbmi;
// If the candidate is INTRA we don't want to consider its mv. // If the candidate is INTRA we don't want to consider its mv.
IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias, IF_DIFF_REF_FRAME_ADD_MV(candidate, ref_frame, ref_sign_bias,
...@@ -183,7 +183,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd, ...@@ -183,7 +183,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
int block, int ref, int mi_row, int mi_col, int block, int ref, int mi_row, int mi_col,
int_mv *nearest_mv, int_mv *near_mv) { int_mv *nearest_mv, int_mv *near_mv) {
int_mv mv_list[MAX_MV_REF_CANDIDATES]; int_mv mv_list[MAX_MV_REF_CANDIDATES];
MODE_INFO *const mi = xd->mi[0].src_mi; MODE_INFO *const mi = xd->mi[0];
b_mode_info *bmi = mi->bmi; b_mode_info *bmi = mi->bmi;
int n; int n;
......
...@@ -201,6 +201,12 @@ typedef struct VP9Common { ...@@ -201,6 +201,12 @@ typedef struct VP9Common {
void (*free_mi)(struct VP9Common *cm); void (*free_mi)(struct VP9Common *cm);
void (*setup_mi)(struct VP9Common *cm); void (*setup_mi)(struct VP9Common *cm);
// Grid of pointers to 8x8 MODE_INFO structs. Any 8x8 not in the visible
// area will be NULL.
MODE_INFO **mi_grid_base;
MODE_INFO **mi_grid_visible;
MODE_INFO **prev_mi_grid_base;
MODE_INFO **prev_mi_grid_visible;
// Whether to use previous frame's motion vectors for prediction. // Whether to use previous frame's motion vectors for prediction.
int use_prev_frame_mvs; int use_prev_frame_mvs;
...@@ -371,7 +377,7 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, ...@@ -371,7 +377,7 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
xd->up_available = (mi_row != 0); xd->up_available = (mi_row != 0);
xd->left_available = (mi_col > tile->mi_col_start); xd->left_available = (mi_col > tile->mi_col_start);
if (xd->up_available) { if (xd->up_available) {
xd->above_mi = xd->mi[-xd->mi_stride].src_mi; xd->above_mi = xd->mi[-xd->mi_stride];
xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL; xd->above_mbmi = xd->above_mi ? &xd->above_mi->mbmi : NULL;
} else { } else {
xd->above_mi = NULL; xd->above_mi = NULL;
...@@ -379,7 +385,7 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile, ...@@ -379,7 +385,7 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
} }
if (xd->left_available) { if (xd->left_available) {
xd->left_mi = xd->mi[-1].src_mi; xd->left_mi = xd->mi[-1];
xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL; xd->left_mbmi = xd->left_mi ? &xd->left_mi->mbmi : NULL;
} else { } else {
xd->left_mi = NULL; xd->left_mi = NULL;
......
...@@ -344,7 +344,7 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) { ...@@ -344,7 +344,7 @@ int vp9_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
// left of the entries corresponding to real blocks. // left of the entries corresponding to real blocks.
// The prediction flags in these dummy entries are initialized to 0. // The prediction flags in these dummy entries are initialized to 0.
int vp9_get_tx_size_context(const MACROBLOCKD *xd) { int vp9_get_tx_size_context(const MACROBLOCKD *xd) {
const int max_tx_size = max_txsize_lookup[xd->mi[0].src_mi->mbmi.sb_type]; const int max_tx_size = max_txsize_lookup[xd->mi[0]->mbmi.sb_type];
const MB_MODE_INFO *const above_mbmi = xd->above_mbmi; const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
const MB_MODE_INFO *const left_mbmi = xd->left_mbmi; const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
const int has_above = xd->up_available; const int has_above = xd->up_available;
......
...@@ -172,7 +172,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, ...@@ -172,7 +172,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block,
int x, int y, int w, int h, int x, int y, int w, int h,
int mi_x, int mi_y) { int mi_x, int mi_y) {
struct macroblockd_plane *const pd = &xd->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane];
const MODE_INFO *mi = xd->mi[0].src_mi; const MODE_INFO *mi = xd->mi[0];
const int is_compound = has_second_ref(&mi->mbmi); const int is_compound = has_second_ref(&mi->mbmi);
const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter); const InterpKernel *kernel = vp9_get_interp_kernel(mi->mbmi.interp_filter);
int ref; int ref;
...@@ -246,7 +246,7 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize, ...@@ -246,7 +246,7 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
const int bw = 4 * num_4x4_w; const int bw = 4 * num_4x4_w;
const int bh = 4 * num_4x4_h; const int bh = 4 * num_4x4_h;
if (xd->mi[0].src_mi->mbmi.sb_type < BLOCK_8X8) { if (xd->mi[0]->mbmi.sb_type < BLOCK_8X8) {
int i = 0, x, y; int i = 0, x, y;
assert(bsize == BLOCK_8X8); assert(bsize == BLOCK_8X8);
for (y = 0; y < num_4x4_h; ++y) for (y = 0; y < num_4x4_h; ++y)
......
...@@ -107,7 +107,7 @@ void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer, ...@@ -107,7 +107,7 @@ void thread_loop_filter_rows(const YV12_BUFFER_CONFIG *const frame_buffer,
for (mi_row = start; mi_row < stop; for (mi_row = start; mi_row < stop;
mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) { mi_row += lf_sync->num_workers * MI_BLOCK_SIZE) {
MODE_INFO *const mi = cm->mi + mi_row * cm->mi_stride; MODE_INFO **const mi = cm->mi_grid_visible + mi_row * cm->mi_stride;
for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) { for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
const int r = mi_row >> MI_BLOCK_SIZE_LOG2; const int r = mi_row >> MI_BLOCK_SIZE_LOG2;
......
...@@ -304,7 +304,7 @@ static void predict_and_reconstruct_intra_block(int plane, int block, ...@@ -304,7 +304,7 @@ static void predict_and_reconstruct_intra_block(int plane, int block,
VP9_COMMON *const cm = args->cm; VP9_COMMON *const cm = args->cm;
MACROBLOCKD *const xd = args->xd; MACROBLOCKD *const xd = args->xd;
struct macroblockd_plane *const pd = &xd->plane[plane]; struct macroblockd_plane *const pd = &xd->plane[plane];
MODE_INFO *const mi = xd->mi[0].src_mi; MODE_INFO *const mi = xd->mi[0];
const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block) const PREDICTION_MODE mode = (plane == 0) ? get_y_mode(mi, block)
: mi->mbmi.uv_mode; : mi->mbmi.uv_mode;
const int16_t *const dequant = (plane == 0) ? args->y_dequant const int16_t *const dequant = (plane == 0) ? args->y_dequant
...@@ -367,13 +367,12 @@ static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, ...@@ -367,13 +367,12 @@ static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
const int offset = mi_row * cm->mi_stride + mi_col; const int offset = mi_row * cm->mi_stride + mi_col;
int x, y; int x, y;
xd->mi = cm->mi + offset; xd->mi = cm->mi_grid_visible + offset;
xd->mi[0].src_mi = &xd->mi[0]; // Point to self. xd->mi[0] = &cm->mi[offset];
xd->mi[0].mbmi.sb_type = bsize; xd->mi[0]->mbmi.sb_type = bsize;
for (y = 0; y < y_mis; ++y) for (y = 0; y < y_mis; ++y)
for (x = !y; x < x_mis; ++x) { for (x = !y; x < x_mis; ++x) {
xd->mi[y * cm->mi_stride + x].src_mi = &xd->mi[0]; xd->mi[y * cm->mi_stride + x] = xd->mi[0];
} }
set_skip_context(xd, mi_row, mi_col); set_skip_context(xd, mi_row, mi_col);
...@@ -383,7 +382,7 @@ static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd, ...@@ -383,7 +382,7 @@ static MB_MODE_INFO *set_offsets(VP9_COMMON *const cm, MACROBLOCKD *const xd,
set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols); set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);