Commit 66f2f65e authored by hui su's avatar hui su

Merge MISC_FIXES

Remove MISC_FIXES flags except for the changes on MV precision, which
has a 0.1% performance drop.

On derflr, the impact is -0.012%.

Change-Id: I0a74e5a212dd0cb827192a318c92a714c9681e45
parent 5f9e089b
......@@ -104,9 +104,7 @@ typedef struct {
TX_SIZE inter_tx_size[64];
#endif
int8_t skip;
#if CONFIG_MISC_FIXES
int8_t has_no_coeffs;
#endif
int8_t segment_id;
int8_t seg_id_predicted; // valid only when temporal_update is enabled
......
......@@ -127,21 +127,6 @@ const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
}
};
#if !CONFIG_MISC_FIXES
const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
{ 144, 11, 54, 157, 195, 130, 46, 58, 108 }, // y = dc
{ 118, 15, 123, 148, 131, 101, 44, 93, 131 }, // y = v
{ 113, 12, 23, 188, 226, 142, 26, 32, 125 }, // y = h
{ 120, 11, 50, 123, 163, 135, 64, 77, 103 }, // y = d45
{ 113, 9, 36, 155, 111, 157, 32, 44, 161 }, // y = d135
{ 116, 9, 55, 176, 76, 96, 37, 61, 149 }, // y = d117
{ 115, 9, 28, 141, 161, 167, 21, 25, 193 }, // y = d153
{ 120, 12, 32, 145, 195, 142, 32, 38, 86 }, // y = d207
{ 116, 12, 64, 120, 140, 125, 49, 115, 121 }, // y = d63
{ 102, 19, 66, 162, 182, 122, 35, 59, 128 } // y = tm
};
#endif
static const vpx_prob default_if_y_probs[BLOCK_SIZE_GROUPS][INTRA_MODES - 1] = {
{ 65, 32, 18, 144, 162, 194, 41, 51, 98 }, // block_size < 8x8
{ 132, 68, 18, 165, 217, 196, 45, 40, 78 }, // block_size < 16x16
......@@ -162,32 +147,6 @@ static const vpx_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
{ 101, 21, 107, 181, 192, 103, 19, 67, 125 } // y = tm
};
#if !CONFIG_MISC_FIXES
const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
// 8x8 -> 4x4
{ 158, 97, 94 }, // a/l both not split
{ 93, 24, 99 }, // a split, l not split
{ 85, 119, 44 }, // l split, a not split
{ 62, 59, 67 }, // a/l both split
// 16x16 -> 8x8
{ 149, 53, 53 }, // a/l both not split
{ 94, 20, 48 }, // a split, l not split
{ 83, 53, 24 }, // l split, a not split
{ 52, 18, 18 }, // a/l both split
// 32x32 -> 16x16
{ 150, 40, 39 }, // a/l both not split
{ 78, 12, 26 }, // a split, l not split
{ 67, 33, 11 }, // l split, a not split
{ 24, 7, 5 }, // a/l both split
// 64x64 -> 32x32
{ 174, 35, 49 }, // a/l both not split
{ 68, 11, 27 }, // a split, l not split
{ 57, 15, 9 }, // l split, a not split
{ 12, 3, 3 }, // a/l both split
};
#endif
static const vpx_prob default_partition_probs[PARTITION_CONTEXTS]
[PARTITION_TYPES - 1] = {
// 8x8 -> 4x4
......@@ -1085,13 +1044,12 @@ default_intra_ext_tx_prob[EXT_TX_SETS_INTRA][EXT_TX_SIZES]
}
};
#endif // CONFIG_EXT_TX
#if CONFIG_MISC_FIXES
// FIXME(someone) need real defaults here
static const struct segmentation_probs default_seg_probs = {
{ 128, 128, 128, 128, 128, 128, 128 },
{ 128, 128, 128 },
};
#endif
#if CONFIG_EXT_INTRA
static const vpx_prob default_ext_intra_probs[2] = {230, 230};
......@@ -1116,10 +1074,8 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
#endif // CONFIG_EXT_TX
#if CONFIG_MISC_FIXES
vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
#endif
#if CONFIG_EXT_INTRA
vp10_copy(fc->ext_intra_probs, default_ext_intra_probs);
#endif // CONFIG_EXT_INTRA
......@@ -1168,16 +1124,6 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
counts->y_mode[i], fc->y_mode_prob[i]);
#if !CONFIG_MISC_FIXES
for (i = 0; i < INTRA_MODES; ++i)
vpx_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
counts->uv_mode[i], fc->uv_mode_prob[i]);
for (i = 0; i < PARTITION_CONTEXTS; i++)
vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
counts->partition[i], fc->partition_prob[i]);
#endif
if (cm->interp_filter == SWITCHABLE) {
for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
vpx_tree_merge_probs(vp10_switchable_interp_tree,
......@@ -1252,7 +1198,7 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
}
}
#endif // CONFIG_EXT_TX
#if CONFIG_MISC_FIXES
if (cm->seg.temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++)
fc->seg.pred_probs[i] = mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
......@@ -1272,7 +1218,7 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
for (i = 0; i < PARTITION_CONTEXTS; i++)
vpx_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
counts->partition[i], fc->partition_prob[i]);
#endif
#if CONFIG_EXT_INTRA
for (i = 0; i < PLANE_TYPES; ++i) {
fc->ext_intra_probs[i] = mode_mv_merge_probs(
......
......@@ -77,9 +77,7 @@ typedef struct frame_contexts {
vpx_prob intra_ext_tx_prob[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
[TX_TYPES - 1];
#endif // CONFIG_EXT_TX
#if CONFIG_MISC_FIXES
struct segmentation_probs seg;
#endif
#if CONFIG_EXT_INTRA
vpx_prob ext_intra_probs[PLANE_TYPES];
#endif // CONFIG_EXT_INTRA
......@@ -111,9 +109,7 @@ typedef struct FRAME_COUNTS {
unsigned int intra_ext_tx[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
[TX_TYPES];
#endif // CONFIG_EXT_TX
#if CONFIG_MISC_FIXES
struct seg_counts seg;
#endif
#if CONFIG_EXT_INTRA
unsigned int ext_intra[PLANE_TYPES][2];
#endif // CONFIG_EXT_INTRA
......@@ -121,11 +117,6 @@ typedef struct FRAME_COUNTS {
extern const vpx_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES]
[INTRA_MODES - 1];
#if !CONFIG_MISC_FIXES
extern const vpx_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
extern const vpx_prob vp10_kf_partition_probs[PARTITION_CONTEXTS]
[PARTITION_TYPES - 1];
#endif
extern const vpx_prob
vp10_default_palette_y_mode_prob[PALETTE_BLOCK_SIZES][PALETTE_Y_MODE_CONTEXTS];
extern const vpx_prob
......
......@@ -719,11 +719,7 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
uint64_t *const int_4x4_y = &lfm->int_4x4_y;
uint16_t *const left_uv = &lfm->left_uv[tx_size_uv];
uint16_t *const above_uv = &lfm->above_uv[tx_size_uv];
#if CONFIG_MISC_FIXES
uint16_t *const int_4x4_uv = &lfm->left_int_4x4_uv;
#else
uint16_t *const int_4x4_uv = &lfm->int_4x4_uv;
#endif
int i;
// If filter level is 0 we don't loop filter.
......@@ -758,13 +754,8 @@ static void build_masks(const loop_filter_info_n *const lfi_n,
// If the block has no coefficients and is not intra we skip applying
// the loop filter on block edges.
#if CONFIG_MISC_FIXES
if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi))
return;
#else
if (mbmi->skip && is_inter_block(mbmi))
return;
#endif
// Here we are adding a mask for the transform size. The transform
// size mask is set to be correct for a 64x64 prediction block size. We
......@@ -821,13 +812,8 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
*above_y |= above_prediction_mask[block_size] << shift_y;
*left_y |= left_prediction_mask[block_size] << shift_y;
#if CONFIG_MISC_FIXES
if ((mbmi->skip || mbmi->has_no_coeffs) && is_inter_block(mbmi))
return;
#else
if (mbmi->skip && is_inter_block(mbmi))
return;
#endif
*above_y |= (size_mask[block_size] &
above_64x64_txform_mask[tx_size_y]) << shift_y;
......@@ -1019,11 +1005,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
lfm->above_uv[i] &= mask_uv;
}
lfm->int_4x4_y &= mask_y;
#if CONFIG_MISC_FIXES
lfm->above_int_4x4_uv = lfm->left_int_4x4_uv & mask_uv;
#else
lfm->int_4x4_uv &= mask_uv;
#endif
// We don't apply a wide loop filter on the last uv block row. If set
// apply the shorter one instead.
......@@ -1057,11 +1039,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
lfm->above_uv[i] &= mask_uv;
}
lfm->int_4x4_y &= mask_y;
#if CONFIG_MISC_FIXES
lfm->left_int_4x4_uv &= mask_uv_int;
#else
lfm->int_4x4_uv &= mask_uv_int;
#endif
// We don't apply a wide loop filter on the last uv column. If set
// apply the shorter one instead.
......@@ -1091,11 +1069,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
assert(!(lfm->left_uv[TX_16X16]&lfm->left_uv[TX_8X8]));
assert(!(lfm->left_uv[TX_16X16] & lfm->left_uv[TX_4X4]));
assert(!(lfm->left_uv[TX_8X8] & lfm->left_uv[TX_4X4]));
#if CONFIG_MISC_FIXES
assert(!(lfm->left_int_4x4_uv & lfm->left_uv[TX_16X16]));
#else
assert(!(lfm->int_4x4_uv & lfm->left_uv[TX_16X16]));
#endif
assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_8X8]));
assert(!(lfm->above_y[TX_16X16] & lfm->above_y[TX_4X4]));
assert(!(lfm->above_y[TX_8X8] & lfm->above_y[TX_4X4]));
......@@ -1103,11 +1077,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_8X8]));
assert(!(lfm->above_uv[TX_16X16] & lfm->above_uv[TX_4X4]));
assert(!(lfm->above_uv[TX_8X8] & lfm->above_uv[TX_4X4]));
#if CONFIG_MISC_FIXES
assert(!(lfm->above_int_4x4_uv & lfm->above_uv[TX_16X16]));
#else
assert(!(lfm->int_4x4_uv & lfm->above_uv[TX_16X16]));
#endif
}
static void filter_selectively_vert(uint8_t *s, int pitch,
......@@ -1510,11 +1480,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
uint16_t mask_16x16 = lfm->left_uv[TX_16X16];
uint16_t mask_8x8 = lfm->left_uv[TX_8X8];
uint16_t mask_4x4 = lfm->left_uv[TX_4X4];
#if CONFIG_MISC_FIXES
uint16_t mask_4x4_int = lfm->left_int_4x4_uv;
#else
uint16_t mask_4x4_int = lfm->int_4x4_uv;
#endif
assert(plane->subsampling_x == 1 && plane->subsampling_y == 1);
......@@ -1566,11 +1532,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
mask_16x16 = lfm->above_uv[TX_16X16];
mask_8x8 = lfm->above_uv[TX_8X8];
mask_4x4 = lfm->above_uv[TX_4X4];
#if CONFIG_MISC_FIXES
mask_4x4_int = lfm->above_int_4x4_uv;
#else
mask_4x4_int = lfm->int_4x4_uv;
#endif
for (r = 0; r < MI_BLOCK_SIZE && mi_row + r < cm->mi_rows; r += 2) {
const int skip_border_4x4_r = mi_row + r == cm->mi_rows - 1;
......
......@@ -80,12 +80,8 @@ typedef struct {
uint64_t int_4x4_y;
uint16_t left_uv[TX_SIZES];
uint16_t above_uv[TX_SIZES];
#if CONFIG_MISC_FIXES
uint16_t left_int_4x4_uv;
uint16_t above_int_4x4_uv;
#else
uint16_t int_4x4_uv;
#endif
uint8_t lfl_y[64];
uint8_t lfl_uv[16];
} LOOP_FILTER_MASK;
......
......@@ -30,11 +30,6 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
const int bw = num_8x8_blocks_wide_lookup[mi->mbmi.sb_type] << 3;
const int bh = num_8x8_blocks_high_lookup[mi->mbmi.sb_type] << 3;
#if !CONFIG_MISC_FIXES
// Blank the reference vector list
memset(mv_ref_list, 0, sizeof(*mv_ref_list) * MAX_MV_REF_CANDIDATES);
#endif
// The nearest 2 blocks are treated differently
// if the size < 8x8 we get the mv from the bmi substructure,
// and we also need to keep a mode count.
......@@ -133,9 +128,6 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
}
if (prev_frame_mvs->ref_frame[1] > INTRA_FRAME &&
#if !CONFIG_MISC_FIXES
prev_frame_mvs->mv[1].as_int != prev_frame_mvs->mv[0].as_int &&
#endif
prev_frame_mvs->ref_frame[1] != ref_frame) {
int_mv mv = prev_frame_mvs->mv[1];
if (ref_sign_bias[prev_frame_mvs->ref_frame[1]] !=
......@@ -148,17 +140,9 @@ static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
}
Done:
mode_context[ref_frame] = counter_to_context[context_counter];
#if CONFIG_MISC_FIXES
for (i = refmv_count; i < MAX_MV_REF_CANDIDATES; ++i)
mv_ref_list[i].as_int = 0;
#else
// Clamp vectors
for (i = 0; i < MAX_MV_REF_CANDIDATES; ++i)
clamp_mv_ref(&mv_ref_list[i].as_mv, bw, bh, xd);
#endif
}
void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
......
......@@ -119,26 +119,13 @@ static const int idx_n_column_to_subblock[4][2] = {
};
// clamp_mv_ref
#if CONFIG_MISC_FIXES
#define MV_BORDER (8 << 3) // Allow 8 pels in 1/8th pel units
#else
#define MV_BORDER (16 << 3) // Allow 16 pels in 1/8th pel units
#endif
static INLINE void clamp_mv_ref(MV *mv, int bw, int bh, const MACROBLOCKD *xd) {
#if CONFIG_MISC_FIXES
clamp_mv(mv, xd->mb_to_left_edge - bw * 8 - MV_BORDER,
xd->mb_to_right_edge + bw * 8 + MV_BORDER,
xd->mb_to_top_edge - bh * 8 - MV_BORDER,
xd->mb_to_bottom_edge + bh * 8 + MV_BORDER);
#else
(void) bw;
(void) bh;
clamp_mv(mv, xd->mb_to_left_edge - MV_BORDER,
xd->mb_to_right_edge + MV_BORDER,
xd->mb_to_top_edge - MV_BORDER,
xd->mb_to_bottom_edge + MV_BORDER);
#endif
}
// This function returns either the appropriate sub block or block's mv
......@@ -164,11 +151,7 @@ static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
return mv;
}
#if CONFIG_MISC_FIXES
#define CLIP_IN_ADD(mv, bw, bh, xd) clamp_mv_ref(mv, bw, bh, xd)
#else
#define CLIP_IN_ADD(mv, bw, bh, xd) do {} while (0)
#endif
// This macro is used to add a motion vector mv_ref list if it isn't
// already in the list. If it's the second motion vector it will also
......@@ -194,8 +177,6 @@ static INLINE int_mv scale_mv(const MB_MODE_INFO *mbmi, int ref,
ADD_MV_REF_LIST(scale_mv((mbmi), 0, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, bw, bh, xd, Done); \
if (has_second_ref(mbmi) && \
(CONFIG_MISC_FIXES || \
(mbmi)->mv[1].as_int != (mbmi)->mv[0].as_int) && \
(mbmi)->ref_frame[1] != ref_frame) \
ADD_MV_REF_LIST(scale_mv((mbmi), 1, ref_frame, ref_sign_bias), \
refmv_count, mv_ref_list, bw, bh, xd, Done); \
......
......@@ -255,9 +255,6 @@ typedef struct VP10Common {
struct loopfilter lf;
struct segmentation seg;
#if !CONFIG_MISC_FIXES
struct segmentation_probs segp;
#endif
int frame_parallel_decode; // frame-based threading.
......
This diff is collapsed.
......@@ -462,7 +462,6 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
}
#endif // CONFIG_EXT_TX
#if CONFIG_MISC_FIXES
for (i = 0; i < PREDICTION_PROBS; i++)
for (j = 0; j < 2; j++)
cm->counts.seg.pred[i][j] += counts->seg.pred[i][j];
......@@ -471,7 +470,6 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
cm->counts.seg.tree_total[i] += counts->seg.tree_total[i];
cm->counts.seg.tree_mispred[i] += counts->seg.tree_mispred[i];
}
#endif
#if CONFIG_EXT_INTRA
for (i = 0; i < PLANE_TYPES; ++i)
......
This diff is collapsed.
......@@ -196,28 +196,18 @@ static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
int mi_offset, int x_mis, int y_mis,
vpx_reader *r) {
struct segmentation *const seg = &cm->seg;
#if CONFIG_MISC_FIXES
FRAME_COUNTS *counts = xd->counts;
struct segmentation_probs *const segp = &cm->fc->seg;
#else
struct segmentation_probs *const segp = &cm->segp;
#endif
int segment_id;
#if !CONFIG_MISC_FIXES
(void) xd;
#endif
if (!seg->enabled)
return 0; // Default for disabled segmentation
assert(seg->update_map && !seg->temporal_update);
segment_id = read_segment_id(r, segp);
#if CONFIG_MISC_FIXES
if (counts)
++counts->seg.tree_total[segment_id];
#endif
set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
return segment_id;
}
......@@ -237,12 +227,8 @@ static void copy_segment_id(const VP10_COMMON *cm,
static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
int mi_row, int mi_col, vpx_reader *r) {
struct segmentation *const seg = &cm->seg;
#if CONFIG_MISC_FIXES
FRAME_COUNTS *counts = xd->counts;
struct segmentation_probs *const segp = &cm->fc->seg;
#else
struct segmentation_probs *const segp = &cm->segp;
#endif
MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
int predicted_segment_id, segment_id;
const int mi_offset = mi_row * cm->mi_cols + mi_col;
......@@ -270,25 +256,19 @@ static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
const int ctx = vp10_get_pred_context_seg_id(xd);
const vpx_prob pred_prob = segp->pred_probs[ctx];
mbmi->seg_id_predicted = vpx_read(r, pred_prob);
#if CONFIG_MISC_FIXES
if (counts)
++counts->seg.pred[ctx][mbmi->seg_id_predicted];
#endif
if (mbmi->seg_id_predicted) {
segment_id = predicted_segment_id;
} else {
segment_id = read_segment_id(r, segp);
#if CONFIG_MISC_FIXES
if (counts)
++counts->seg.tree_mispred[segment_id];
#endif
}
} else {
segment_id = read_segment_id(r, segp);
#if CONFIG_MISC_FIXES
if (counts)
++counts->seg.tree_total[segment_id];
#endif
}
set_segment_id(cm, mi_offset, x_mis, y_mis, segment_id);
return segment_id;
......
......@@ -459,9 +459,7 @@ vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
// an invalid bitstream and need to return an error.
uint8_t marker;
#if CONFIG_MISC_FIXES
size_t frame_sz_sum = 0;
#endif
assert(data_sz);
marker = read_marker(decrypt_cb, decrypt_state, data + data_sz - 1);
......@@ -470,7 +468,7 @@ vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
if ((marker & 0xe0) == 0xc0) {
const uint32_t frames = (marker & 0x7) + 1;
const uint32_t mag = ((marker >> 3) & 0x3) + 1;
const size_t index_sz = 2 + mag * (frames - CONFIG_MISC_FIXES);
const size_t index_sz = 2 + mag * (frames - 1);
// This chunk is marked as having a superframe index but doesn't have
// enough data for it, thus it's an invalid superframe index.
......@@ -501,20 +499,16 @@ vpx_codec_err_t vp10_parse_superframe_index(const uint8_t *data,
x = clear_buffer;
}
for (i = 0; i < frames - CONFIG_MISC_FIXES; ++i) {
for (i = 0; i < frames - 1; ++i) {
uint32_t this_sz = 0;
for (j = 0; j < mag; ++j)
this_sz |= (*x++) << (j * 8);
this_sz += CONFIG_MISC_FIXES;
this_sz += 1;
sizes[i] = this_sz;
#if CONFIG_MISC_FIXES
frame_sz_sum += this_sz;
#endif
}
#if CONFIG_MISC_FIXES
sizes[i] = data_sz - index_sz - frame_sz_sum;
#endif
*count = frames;
}
}
......
......@@ -164,11 +164,7 @@ static int decode_coefs(const MACROBLOCKD *xd,
val = CAT5_MIN_VAL + read_coeff(cat5_prob, 5, r);
break;
case CATEGORY6_TOKEN: {
#if CONFIG_MISC_FIXES
const int skip_bits = TX_SIZES - 1 - tx_size;
#else
const int skip_bits = 0;
#endif
const uint8_t *cat6p = cat6_prob + skip_bits;
#if CONFIG_VP9_HIGHBITDEPTH
switch (xd->bd) {
......
......@@ -23,13 +23,13 @@ static int inv_recenter_nonneg(int v, int m) {
static int decode_uniform(vpx_reader *r) {
const int l = 8;
const int m = (1 << l) - 191 + CONFIG_MISC_FIXES;
const int m = (1 << l) - 192;
const int v = vpx_read_literal(r, l - 1);
return v < m ? v : (v << 1) - m + vpx_read_bit(r);
}
static int inv_remap_prob(int v, int m) {
static uint8_t inv_map_table[MAX_PROB - CONFIG_MISC_FIXES] = {
static uint8_t inv_map_table[MAX_PROB - 1] = {
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176, 189,
202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 27,
......@@ -47,9 +47,6 @@ static int inv_remap_prob(int v, int m) {
207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221, 222,
223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253,
#if !CONFIG_MISC_FIXES
253
#endif
};
assert(v < (int)(sizeof(inv_map_table) / sizeof(inv_map_table[0])));
v = inv_map_table[v];
......
This diff is collapsed.
......@@ -2818,15 +2818,9 @@ static void encode_frame_internal(VP10_COMP *cpi) {
vp10_zero(rdc->filter_diff);
for (i = 0; i < (cm->seg.enabled ? MAX_SEGMENTS : 1); ++i) {
#if CONFIG_MISC_FIXES
const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
#endif
xd->lossless[i] = cm->y_dc_delta_q == 0 &&
#if CONFIG_MISC_FIXES
qindex == 0 &&
#else
cm->base_qindex == 0 &&
#endif
cm->uv_dc_delta_q == 0 &&
cm->uv_ac_delta_q == 0;
}
......
......@@ -137,19 +137,8 @@ static void build_nmv_component_cost_table(int *mvcost,
static void update_mv(vpx_writer *w, const unsigned int ct[2], vpx_prob *cur_p,
vpx_prob upd_p) {
#if CONFIG_MISC_FIXES
(void) upd_p;
vp10_cond_prob_diff_update(w, cur_p, ct);
#else
const vpx_prob new_p = get_binary_prob(ct[0], ct[1]) | 1;
const int update = cost_branch256(ct, *cur_p) + vp10_cost_zero(upd_p) >
cost_branch256(ct, new_p) + vp10_cost_one(upd_p) + 7 * 256;
vpx_write(w, update, upd_p);
if (update) {
*cur_p = new_p;
vpx_write_literal(w, new_p >> 1, 7);
}
#endif
}
static void write_mv_update(const vpx_tree_index *tree,
......
......@@ -419,10 +419,6 @@ static void save_coding_context(VP10_COMP *cpi) {
memcpy(cc->nmvcosts_hp[1], cpi->nmvcosts_hp[1],
MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
#if !CONFIG_MISC_FIXES
vp10_copy(cc->segment_pred_probs, cm->segp.pred_probs);
#endif
memcpy(cpi->coding_context.last_frame_seg_map_copy,
cm->last_frame_seg_map, (cm->mi_rows * cm->mi_cols));
......@@ -447,10 +443,6 @@ static void restore_coding_context(VP10_COMP *cpi) {
memcpy(cpi->nmvcosts_hp[1], cc->nmvcosts_hp[1],
MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
#if !CONFIG_MISC_FIXES
vp10_copy(cm->segp.pred_probs, cc->segment_pred_probs);
#endif
memcpy(cm->last_frame_seg_map,
cpi->coding_context.last_frame_seg_map_copy,
(cm->mi_rows * cm->mi_cols));
......@@ -3677,12 +3669,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi,
if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
vp10_adapt_coef_probs(cm);
#if CONFIG_MISC_FIXES
vp10_adapt_intra_frame_probs(cm);
#else
if (!frame_is_intra_only(cm))
vp10_adapt_intra_frame_probs(cm);
#endif
}
if (!frame_is_intra_only(cm)) {
......
......@@ -55,10 +55,6 @@ typedef struct {
int nmvcosts[2][MV_VALS];
int nmvcosts_hp[2][MV_VALS];
#if !CONFIG_MISC_FIXES
vpx_prob segment_pred_probs[PREDICTION_PROBS];
#endif
unsigned char *last_frame_seg_map_copy;
// 0 = Intra, Last, GF, ARF
......
......@@ -963,11 +963,10 @@ static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
assert(bs == xd->mi[0]->mbmi.sb_type);