Commit fe45b216 authored by Jingning Han's avatar Jingning Han

Deprecated the separate handle on rectangular tx_size in var_tx

The recursive transform block partitioning system naturally supports
the use of rectangular transform block size. Hence there is no need
to make a separate coding route for the rectangular transform block
size support there.

Change-Id: I709b61f94cd4a6ea3f33c05abe319488c7d24f5a
parent 83ed6fe9
......@@ -980,12 +980,6 @@ static const aom_prob default_switchable_restore_prob[RESTORE_SWITCHABLE_TYPES -
1] = { 32, 85, 128 };
#endif // CONFIG_LOOP_RESTORATION
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_VAR_TX
// the probability of (0) using recursive square tx partition vs.
// (1) biggest rect tx for 4X8-8X4/8X16-16X8/16X32-32X16 blocks
static const aom_prob default_rect_tx_prob[TX_SIZES - 1] = { 192, 192, 192 };
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_VAR_TX
#if CONFIG_PALETTE
int av1_get_palette_color_context(const uint8_t *color_map, int cols, int r,
int c, int n, uint8_t *color_order,
......@@ -1470,9 +1464,6 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
av1_copy(fc->tx_size_probs, default_tx_size_prob);
#if CONFIG_VAR_TX
av1_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
av1_copy(fc->rect_tx_prob, default_rect_tx_prob);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#endif
av1_copy(fc->skip_probs, default_skip_probs);
#if CONFIG_REF_MV
......@@ -1705,15 +1696,6 @@ void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
}
#endif // CONFIG_EXT_INTER
#if CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX
if (cm->tx_mode == TX_MODE_SELECT) {
for (i = 0; i < MAX_TX_DEPTH; ++i) {
fc->rect_tx_prob[i] =
av1_mode_mv_merge_probs(pre_fc->rect_tx_prob[i], counts->rect_tx[i]);
}
}
#endif // CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX
for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->y_mode_prob[i],
counts->y_mode[i], fc->y_mode_prob[i]);
......
......@@ -128,10 +128,6 @@ typedef struct frame_contexts {
aom_prob tx_size_probs[MAX_TX_DEPTH][TX_SIZE_CONTEXTS][MAX_TX_DEPTH];
#if CONFIG_VAR_TX
aom_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
#if CONFIG_EXT_TX && CONFIG_RECT_TX
// TODO(yuec) make this flag harmonize with the original syntax
aom_prob rect_tx_prob[TX_SIZES - 1];
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#endif
aom_prob skip_probs[SKIP_CONTEXTS];
#if CONFIG_REF_MV
......@@ -242,9 +238,6 @@ typedef struct FRAME_COUNTS {
unsigned int tx_size[MAX_TX_DEPTH][TX_SIZE_CONTEXTS][TX_SIZES];
#if CONFIG_VAR_TX
unsigned int txfm_partition[TXFM_PARTITION_CONTEXTS][2];
#if CONFIG_EXT_TX && CONFIG_RECT_TX
unsigned int rect_tx[TX_SIZES - 1][2];
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#endif
unsigned int skip[SKIP_CONTEXTS][2];
#if CONFIG_REF_MV
......
......@@ -1268,13 +1268,7 @@ void av1_filter_block_plane_non420_ver(AV1_COMMON *cm,
const int skip_this_r = skip_this && !block_edge_above;
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
TX_SIZE mb_tx_size = is_rect_tx(mbmi->tx_size)
? mbmi->tx_size
: mbmi->inter_tx_size[blk_row][blk_col];
#else
const TX_SIZE mb_tx_size = mbmi->inter_tx_size[blk_row][blk_col];
#endif
#endif
TX_SIZE tx_size = (plane->plane_type == PLANE_TYPE_UV)
......@@ -1310,24 +1304,13 @@ void av1_filter_block_plane_non420_ver(AV1_COMMON *cm,
tx_size_mask = 0;
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
tx_size_r =
AOMMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
tx_size_c = AOMMIN(txsize_vert_map[tx_size],
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
cm->above_txfm_context[mi_col + c] = txsize_horz_map[tx_size];
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] =
txsize_vert_map[tx_size];
#else
tx_size_r = AOMMIN(tx_size, cm->above_txfm_context[mi_col + c]);
tx_size_c =
AOMMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
cm->above_txfm_context[mi_col + c] = tx_size;
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] = tx_size;
#endif
#endif
#endif // CONFIG_VAR_TX
// Build masks based on the transform size of each block
// handle vertical mask
......@@ -1462,13 +1445,7 @@ void av1_filter_block_plane_non420_hor(AV1_COMMON *cm,
? get_uv_tx_size(mbmi, plane)
: mbmi->tx_size;
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
TX_SIZE mb_tx_size = is_rect_tx(mbmi->tx_size)
? mbmi->tx_size
: mbmi->inter_tx_size[blk_row][blk_col];
#else
TX_SIZE mb_tx_size = mbmi->inter_tx_size[blk_row][blk_col];
#endif
#endif
const int skip_border_4x4_c = ss_x && mi_col + c == cm->mi_cols - 1;
const int skip_border_4x4_r = ss_y && mi_row + r == cm->mi_rows - 1;
......@@ -1500,23 +1477,12 @@ void av1_filter_block_plane_non420_hor(AV1_COMMON *cm,
tx_size_mask = 0;
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
tx_size_r =
AOMMIN(txsize_horz_map[tx_size], cm->above_txfm_context[mi_col + c]);
tx_size_c = AOMMIN(txsize_vert_map[tx_size],
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
cm->above_txfm_context[mi_col + c] = txsize_horz_map[tx_size];
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] =
txsize_vert_map[tx_size];
#else
tx_size_r = AOMMIN(tx_size, cm->above_txfm_context[mi_col + c]);
tx_size_c =
AOMMIN(tx_size, cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK]);
cm->above_txfm_context[mi_col + c] = tx_size;
cm->left_txfm_context[(mi_row + r) & MAX_MIB_MASK] = tx_size;
#endif
#endif
// Build masks based on the transform size of each block
......
......@@ -552,7 +552,8 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
}
#endif // CONFIG_VAR_TX
#if !CONFIG_VAR_TX || CONFIG_SUPERTX || (CONFIG_EXT_TX && CONFIG_RECT_TX)
#if !CONFIG_VAR_TX || CONFIG_SUPERTX || \
(!CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX)
static int reconstruct_inter_block(AV1_COMMON *cm, MACROBLOCKD *const xd,
#if CONFIG_ANS
struct AnsDecoder *const r,
......@@ -1612,37 +1613,13 @@ static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
const TX_SIZE max_tx_size = max_txsize_rect_lookup[plane_bsize];
const int bh_var_tx = tx_size_high_unit[max_tx_size];
const int bw_var_tx = tx_size_wide_unit[max_tx_size];
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx(mbmi->tx_size)) {
const TX_SIZE tx_size =
plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
const int stepr = tx_size_high_unit[tx_size];
const int stepc = tx_size_wide_unit[tx_size];
int max_blocks_wide =
block_width +
(xd->mb_to_right_edge >= 0 ? 0 : xd->mb_to_right_edge >>
(3 + pd->subsampling_x));
int max_blocks_high =
block_height +
(xd->mb_to_bottom_edge >= 0 ? 0 : xd->mb_to_bottom_edge >>
(3 + pd->subsampling_y));
max_blocks_wide >>= tx_size_wide_log2[0];
max_blocks_high >>= tx_size_wide_log2[0];
for (row = 0; row < max_blocks_high; row += stepr)
for (col = 0; col < max_blocks_wide; col += stepc)
eobtotal += reconstruct_inter_block(cm, xd, r, mbmi->segment_id,
plane, row, col, tx_size);
} else {
#endif
block_width >>= tx_size_wide_log2[0];
block_height >>= tx_size_wide_log2[0];
for (row = 0; row < block_height; row += bh_var_tx)
for (col = 0; col < block_width; col += bw_var_tx)
decode_reconstruct_tx(cm, xd, r, mbmi, plane, plane_bsize, row,
col, max_tx_size, &eobtotal);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
}
#endif
block_width >>= tx_size_wide_log2[0];
block_height >>= tx_size_wide_log2[0];
for (row = 0; row < block_height; row += bh_var_tx)
for (col = 0; col < block_width; col += bw_var_tx)
decode_reconstruct_tx(cm, xd, r, mbmi, plane, plane_bsize, row, col,
max_tx_size, &eobtotal);
#else
const TX_SIZE tx_size =
plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
......@@ -4243,12 +4220,6 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
#if CONFIG_VAR_TX
for (k = 0; k < TXFM_PARTITION_CONTEXTS; ++k)
av1_diff_update_prob(&r, &fc->txfm_partition_prob[k], ACCT_STR);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (cm->tx_mode == TX_MODE_SELECT) {
for (i = 1; i < MAX_TX_DEPTH; ++i)
av1_diff_update_prob(&r, &fc->rect_tx_prob[i], ACCT_STR);
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#endif // CONFIG_VAR_TX
#endif // !CONFIG_PVQ
for (k = 0; k < SKIP_CONTEXTS; ++k)
......
......@@ -1855,31 +1855,12 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
const int width = block_size_wide[bsize] >> tx_size_wide_log2[0];
const int height = block_size_high[bsize] >> tx_size_wide_log2[0];
int idx, idy;
#if CONFIG_EXT_TX && CONFIG_RECT_TX
int is_rect_tx_allowed = inter_block && is_rect_tx_allowed_bsize(bsize) &&
!xd->lossless[mbmi->segment_id];
int use_rect_tx = 0;
int tx_size_cat = inter_tx_size_cat_lookup[bsize];
if (is_rect_tx_allowed) {
use_rect_tx = aom_read(r, cm->fc->rect_tx_prob[tx_size_cat], ACCT_STR);
if (xd->counts) {
++xd->counts->rect_tx[tx_size_cat][use_rect_tx];
}
}
if (use_rect_tx) {
mbmi->tx_size = max_txsize_rect_lookup[bsize];
set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, mbmi->skip, xd);
} else {
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
mbmi->min_tx_size = TX_SIZES_ALL;
for (idy = 0; idy < height; idy += bh)
for (idx = 0; idx < width; idx += bw)
read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size,
height != width, idy, idx, r);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
}
#endif
mbmi->min_tx_size = TX_SIZES_ALL;
for (idy = 0; idy < height; idy += bh)
for (idx = 0; idx < width; idx += bw)
read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size,
height != width, idy, idx, r);
} else {
if (inter_block)
mbmi->tx_size = read_tx_size_inter(cm, xd, !mbmi->skip, r);
......
......@@ -1287,26 +1287,10 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
const int width = num_4x4_blocks_wide_lookup[bsize];
const int height = num_4x4_blocks_high_lookup[bsize];
int idx, idy;
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx_allowed(xd, mbmi)) {
int tx_size_cat = inter_tx_size_cat_lookup[bsize];
aom_write(w, is_rect_tx(mbmi->tx_size),
cm->fc->rect_tx_prob[tx_size_cat]);
}
if (is_rect_tx(mbmi->tx_size)) {
set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
} else {
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
for (idy = 0; idy < height; idy += bh)
for (idx = 0; idx < width; idx += bw)
write_tx_size_vartx(cm, xd, mbmi, max_tx_size, height != width, idy,
idx, w);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
for (idy = 0; idy < height; idy += bh)
for (idx = 0; idx < width; idx += bw)
write_tx_size_vartx(cm, xd, mbmi, max_tx_size, height != width, idy,
idx, w);
} else {
set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
write_selected_tx_size(cm, xd, w);
......@@ -1976,21 +1960,10 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
const int num_4x4_h =
block_size_high[plane_bsize] >> tx_size_wide_log2[0];
int row, col;
#if CONFIG_EXT_TX && CONFIG_RECT_TX
TX_SIZE tx_size =
plane ? get_uv_tx_size(mbmi, &xd->plane[plane]) : mbmi->tx_size;
#endif
TOKEN_STATS token_stats;
init_token_stats(&token_stats);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_inter_block(mbmi) && !is_rect_tx(tx_size))
#else
if (is_inter_block(mbmi))
#endif
{
if (is_inter_block(mbmi)) {
const TX_SIZE max_tx_size = max_txsize_rect_lookup[plane_bsize];
int block = 0;
const int step =
......@@ -2011,7 +1984,7 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
dump_mode_info(m);
assert(0);
}
#endif
#endif // CONFIG_RD_DEBUG
} else {
TX_SIZE tx = plane ? get_uv_tx_size(&m->mbmi, &xd->plane[plane])
: m->mbmi.tx_size;
......@@ -2036,7 +2009,7 @@ static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
}
#else
(void)mbmi;
#endif
#endif // CONFIG_RD_DEBUG
#endif // CONFIG_VAR_TX
assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN);
......@@ -4155,13 +4128,6 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#endif
#if CONFIG_VAR_TX
update_txfm_partition_probs(cm, header_bc, counts, probwt);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (cm->tx_mode == TX_MODE_SELECT) {
for (i = 1; i < TX_SIZES - 1; ++i)
av1_cond_prob_diff_update(header_bc, &fc->rect_tx_prob[i],
counts->rect_tx[i], probwt);
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#endif
update_skip_probs(cm, header_bc, counts);
......
......@@ -5496,13 +5496,8 @@ static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
av1_encode_sb((AV1_COMMON *)cm, x, AOMMAX(bsize, BLOCK_8X8));
#if CONFIG_VAR_TX
if (mbmi->skip) mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx(mbmi->tx_size))
av1_tokenize_sb(cpi, td, t, dry_run, AOMMAX(bsize, BLOCK_8X8), rate);
else
#endif
av1_tokenize_sb_vartx(cpi, td, t, dry_run, mi_row, mi_col,
AOMMAX(bsize, BLOCK_8X8), rate);
av1_tokenize_sb_vartx(cpi, td, t, dry_run, mi_row, mi_col,
AOMMAX(bsize, BLOCK_8X8), rate);
#else
av1_tokenize_sb(cpi, td, t, dry_run, AOMMAX(bsize, BLOCK_8X8), rate);
#endif
......@@ -5526,23 +5521,13 @@ static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx_allowed(xd, mbmi)) {
td->counts->rect_tx[tx_size_cat][is_rect_tx(tx_size)]++;
}
if (!is_rect_tx_allowed(xd, mbmi) || !is_rect_tx(tx_size)) {
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_inter) {
tx_partition_count_update(cm, x, bsize, mi_row, mi_col, td->counts);
} else {
++td->counts->tx_size[tx_size_cat][tx_size_ctx][depth];
if (tx_size != max_txsize_lookup[bsize]) ++x->txb_split_count;
}
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_inter) {
tx_partition_count_update(cm, x, bsize, mi_row, mi_col, td->counts);
} else {
++td->counts->tx_size[tx_size_cat][tx_size_ctx][depth];
if (tx_size != max_txsize_lookup[bsize]) ++x->txb_split_count;
}
#endif
#endif
#if !CONFIG_VAR_TX
#else
++td->counts->tx_size[tx_size_cat][tx_size_ctx][depth];
#endif
} else {
......@@ -5608,37 +5593,18 @@ static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
#if CONFIG_VAR_TX
if (cm->tx_mode == TX_MODE_SELECT && mbmi->sb_type >= BLOCK_8X8 && is_inter &&
!(mbmi->skip || seg_skip)) {
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx(mbmi->tx_size)) {
set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, (mbmi->skip || seg_skip),
xd);
} else {
if (dry_run) tx_partition_set_contexts(cm, xd, bsize, mi_row, mi_col);
}
#else
if (dry_run) tx_partition_set_contexts(cm, xd, bsize, mi_row, mi_col);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
} else {
TX_SIZE tx_size = mbmi->tx_size;
// The new intra coding scheme requires no change of transform size
if (is_inter)
#if CONFIG_EXT_TX && CONFIG_RECT_TX
{
tx_size = AOMMIN(tx_mode_to_biggest_tx_size[cm->tx_mode],
max_txsize_lookup[bsize]);
if (txsize_sqr_map[max_txsize_rect_lookup[bsize]] <= tx_size)
tx_size = max_txsize_rect_lookup[bsize];
if (xd->lossless[mbmi->segment_id]) tx_size = TX_4X4;
}
#else
tx_size = tx_size_from_tx_mode(bsize, cm->tx_mode, is_inter);
#endif
else
tx_size = (bsize >= BLOCK_8X8) ? tx_size : TX_4X4;
mbmi->tx_size = tx_size;
set_txfm_ctxs(tx_size, xd->n8_w, xd->n8_h, (mbmi->skip || seg_skip), xd);
}
#endif
#endif // CONFIG_VAR_TX
}
#if CONFIG_SUPERTX
......
......@@ -1209,22 +1209,13 @@ void av1_encode_sb(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) {
arg.tl = ctx.tl[plane];
#if CONFIG_VAR_TX
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx(mbmi->tx_size)) {
av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
&arg);
} else {
#endif
for (idy = 0; idy < mi_height; idy += bh) {
for (idx = 0; idx < mi_width; idx += bw) {
encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
&arg);
block += step;
}
for (idy = 0; idy < mi_height; idy += bh) {
for (idx = 0; idx < mi_width; idx += bw) {
encode_block_inter(plane, block, idy, idx, plane_bsize, max_tx_size,
&arg);
block += step;
}
#if CONFIG_EXT_TX && CONFIG_RECT_TX
}
#endif
#else
av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
&arg);
......
......@@ -3259,7 +3259,6 @@ static void select_tx_block(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
int ctx =
txfm_partition_context(tx_above + (blk_col >> 1),
tx_left + (blk_row >> 1), mbmi->sb_type, tx_size);
int64_t sum_rd = INT64_MAX;
int tmp_eob = 0;
int zero_blk_rate;
......@@ -3267,6 +3266,7 @@ static void select_tx_block(const AV1_COMP *cpi, MACROBLOCK *x, int blk_row,
const int tx_size_ctx = txsize_sqr_map[tx_size];
av1_init_rd_stats(&sum_rd_stats);
assert(tx_size < TX_SIZES_ALL);
if (ref_best_rd < 0) {
......@@ -3455,57 +3455,6 @@ static int64_t select_tx_size_fix_type(const AV1_COMP *cpi, MACROBLOCK *x,
mbmi->tx_type = tx_type;
mbmi->min_tx_size = TX_SIZES_ALL;
inter_block_yrd(cpi, x, rd_stats, bsize, ref_best_rd, rd_stats_stack);
#if CONFIG_EXT_TX && CONFIG_RECT_TX
if (is_rect_tx_allowed(xd, mbmi)) {
RD_STATS rect_rd_stats;
int64_t rd_rect_tx;
int tx_size_cat = inter_tx_size_cat_lookup[bsize];
TX_SIZE tx_size = max_txsize_rect_lookup[bsize];
TX_SIZE var_tx_size = mbmi->tx_size;
txfm_rd_in_plane(x, cpi, &rect_rd_stats, ref_best_rd, 0, bsize, tx_size,
cpi->sf.use_fast_coef_costing);
if (rd_stats->rate != INT_MAX) {
rd_stats->rate += av1_cost_bit(cm->fc->rect_tx_prob[tx_size_cat], 0);
if (rd_stats->skip) {
rd = RDCOST(x->rdmult, x->rddiv, s1, rd_stats->sse);
} else {
rd = RDCOST(x->rdmult, x->rddiv, rd_stats->rate + s0, rd_stats->dist);
if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
!rd_stats->skip)
rd = AOMMIN(rd, RDCOST(x->rdmult, x->rddiv, s1, rd_stats->sse));
}
} else {
rd = INT64_MAX;
}
if (rect_rd_stats.rate != INT_MAX) {
rect_rd_stats.rate += av1_cost_bit(cm->fc->rect_tx_prob[tx_size_cat], 1);
if (rect_rd_stats.skip) {
rd_rect_tx = RDCOST(x->rdmult, x->rddiv, s1, rect_rd_stats.sse);
} else {
rd_rect_tx = RDCOST(x->rdmult, x->rddiv, rect_rd_stats.rate + s0,
rect_rd_stats.dist);
if (is_inter && !xd->lossless[xd->mi[0]->mbmi.segment_id] &&
!(rect_rd_stats.skip))
rd_rect_tx = AOMMIN(
rd_rect_tx, RDCOST(x->rdmult, x->rddiv, s1, rect_rd_stats.sse));
}
} else {
rd_rect_tx = INT64_MAX;
}
if (rd_rect_tx < rd) {
*rd_stats = rect_rd_stats;
if (!xd->lossless[mbmi->segment_id]) x->blk_skip[0][0] = rd_stats->skip;
mbmi->tx_size = tx_size;
mbmi->inter_tx_size[0][0] = mbmi->tx_size;
} else {
mbmi->tx_size = var_tx_size;
}
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
if (rd_stats->rate == INT_MAX) return INT64_MAX;
......@@ -10899,9 +10848,9 @@ void av1_rd_pick_inter_mode_sub8x8(const struct AV1_COMP *cpi,
// macroblock modes
*mbmi = best_mbmode;
#if CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX
#if CONFIG_VAR_TX
mbmi->inter_tx_size[0][0] = mbmi->tx_size;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
#endif
x->skip |= best_skip2;
if (!is_inter_block(&best_mbmode)) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment