Commit d6bdd46b authored by Yue Chen's avatar Yue Chen

rect_tx_ext: work with var_tx

Change-Id: Ie2c34490dc50cb242bcd701308e6b55243883b15
parent 748d570e
......@@ -1085,8 +1085,10 @@ static INLINE int is_rect_tx_allowed(const MACROBLOCKD *xd,
return is_rect_tx_allowed_bsize(mbmi->sb_type) &&
!xd->lossless[mbmi->segment_id];
}
#endif // CONFIG_RECT_TX
#endif // CONFIG_EXT_TX
#if CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
static INLINE int is_quarter_tx_allowed_bsize(BLOCK_SIZE bsize) {
static const char LUT_QTTX[BLOCK_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2 || CONFIG_CHROMA_SUB8X8
......@@ -1127,9 +1129,7 @@ static INLINE int is_quarter_tx_allowed(const MACROBLOCKD *xd,
return is_quarter_tx_allowed_bsize(mbmi->sb_type) && is_inter &&
!xd->lossless[mbmi->segment_id];
}
#endif // CONFIG_RECT_TX_EXT
#endif // CONFIG_RECT_TX
#endif // CONFIG_EXT_TX
#endif
static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode,
int is_inter) {
......
......@@ -655,13 +655,20 @@ static const TX_SIZE max_txsize_rect_lookup[BLOCK_SIZES_ALL] = {
TX_32X32, TX_32X32, TX_32X32,
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_TX64X64
#if CONFIG_RECT_TX_EXT
// 4x16, 16x4, 8x32
TX_4X16, TX_16X4, TX_8X32,
// 32x8
TX_32X8
#else
// 4x16, 16x4, 8x32
TX_4X8, TX_8X4, TX_8X16,
// 32x8
TX_16X8
#endif
};
#if CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT
static const TX_SIZE quarter_txsize_lookup[BLOCK_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2 || CONFIG_CHROMA_SUB8X8
// 2X2, 2X4, 4X2,
......@@ -686,7 +693,7 @@ static const TX_SIZE quarter_txsize_lookup[BLOCK_SIZES_ALL] = {
// 32x8
TX_32X8
};
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
#endif
#else
#define max_txsize_rect_lookup max_txsize_lookup
#endif // CONFIG_RECT_TX && (CONFIG_EXT_TX || CONFIG_VAR_TX)
......
......@@ -315,7 +315,7 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
*(const uint64_t *)(l + 16) | *(const uint64_t *)(l + 24));
break;
#endif // CONFIG_TX64X64
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
case TX_4X16:
above_ec = !!*(const uint16_t *)a;
left_ec = !!*(const uint64_t *)l;
......@@ -332,7 +332,7 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
above_ec = !!(*(const uint64_t *)a | *(const uint64_t *)(a + 8));
left_ec = !!*(const uint32_t *)l;
break;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
default: assert(0 && "Invalid transform size."); break;
}
return combine_entropy_contexts(above_ec, left_ec);
......@@ -385,7 +385,7 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
left_ec = !!(*(const uint64_t *)l | *(const uint64_t *)(l + 8));
break;
#endif // CONFIG_TX64X64
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
case TX_4X16:
above_ec = a[0] != 0;
left_ec = !!*(const uint32_t *)l;
......@@ -402,7 +402,7 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
above_ec = !!*(const uint64_t *)a;
left_ec = !!*(const uint16_t *)l;
break;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
default: assert(0 && "Invalid transform size."); break;
}
return combine_entropy_contexts(above_ec, left_ec);
......
......@@ -1737,9 +1737,9 @@ static const aom_prob default_tx_size_prob[MAX_TX_DEPTH][TX_SIZE_CONTEXTS]
#endif // CONFIG_TX64X64
};
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
static const aom_prob default_quarter_tx_size_prob = 192;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_LOOP_RESTORATION
const aom_tree_index
......@@ -4952,9 +4952,9 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
av1_copy(fc->comp_inter_mode_prob, default_comp_inter_mode_p);
#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
av1_copy(fc->tx_size_probs, default_tx_size_prob);
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
fc->quarter_tx_size_prob = default_quarter_tx_size_prob;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_VAR_TX
av1_copy(fc->txfm_partition_prob, default_txfm_partition_probs);
#if CONFIG_NEW_MULTISYMBOL
......@@ -5231,10 +5231,10 @@ void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
aom_tree_merge_probs(av1_tx_size_tree[i], pre_fc->tx_size_probs[i][j],
counts->tx_size[i][j], fc->tx_size_probs[i][j]);
}
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
fc->quarter_tx_size_prob = av1_mode_mv_merge_probs(
pre_fc->quarter_tx_size_prob, counts->quarter_tx_size);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
}
#if CONFIG_VAR_TX
......
......@@ -277,9 +277,9 @@ typedef struct frame_contexts {
aom_prob comp_inter_mode_prob[COMP_INTER_MODE_CONTEXTS];
#endif // CONFIG_EXT_INTER && CONFIG_COMPOUND_SINGLEREF
aom_prob tx_size_probs[MAX_TX_DEPTH][TX_SIZE_CONTEXTS][MAX_TX_DEPTH];
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
aom_prob quarter_tx_size_prob;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_VAR_TX
aom_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
#if CONFIG_NEW_MULTISYMBOL
......@@ -467,9 +467,9 @@ typedef struct FRAME_COUNTS {
// belong into this structure.
unsigned int tx_size_totals[TX_SIZES];
unsigned int tx_size[MAX_TX_DEPTH][TX_SIZE_CONTEXTS][MAX_TX_DEPTH + 1];
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
unsigned int quarter_tx_size[2];
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_VAR_TX
unsigned int txfm_partition[TXFM_PARTITION_CONTEXTS][2];
#endif
......
......@@ -1623,7 +1623,7 @@ static void inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
}
// These will be used by the masked-tx experiment in the future.
#if CONFIG_RECT_TX && CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
static void inv_txfm_add_4x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_LGT
......@@ -1659,7 +1659,7 @@ static void inv_txfm_add_32x8(const tran_low_t *input, uint8_t *dest,
av1_iht32x8_256_add(input, dest, stride, txfm_param);
#endif
}
#endif // CONFIG_RECT_TX && CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
#endif
static void inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
......@@ -2146,7 +2146,7 @@ void av1_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
#if CONFIG_CHROMA_2X2
case TX_2X2: inv_txfm_add_2x2(input, dest, stride, txfm_param); break;
#endif
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
case TX_32X8: inv_txfm_add_32x8(input, dest, stride, txfm_param); break;
case TX_8X32: inv_txfm_add_8x32(input, dest, stride, txfm_param); break;
case TX_16X4: inv_txfm_add_16x4(input, dest, stride, txfm_param); break;
......
......@@ -723,17 +723,33 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
pd->dst.stride, max_scan_line, eob);
*eob_total += eob;
} else {
#if CONFIG_RECT_TX_EXT
int is_qttx = plane_tx_size == quarter_txsize_lookup[plane_bsize];
const TX_SIZE sub_txs = is_qttx ? plane_tx_size : sub_tx_size_map[tx_size];
if (is_qttx) assert(blk_row == 0 && blk_col == 0 && block == 0);
#else
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
assert(sub_txs < tx_size);
#endif
const int bsl = tx_size_wide_unit[sub_txs];
int sub_step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
assert(sub_txs < tx_size);
int i;
assert(bsl > 0);
for (i = 0; i < 4; ++i) {
#if CONFIG_RECT_TX_EXT
int is_wide_tx = tx_size_wide_unit[sub_txs] > tx_size_high_unit[sub_txs];
const int offsetr =
is_qttx ? (is_wide_tx ? i * tx_size_high_unit[sub_txs] : 0)
: blk_row + ((i >> 1) * bsl);
const int offsetc =
is_qttx ? (is_wide_tx ? 0 : i * tx_size_wide_unit[sub_txs])
: blk_col + (i & 0x01) * bsl;
#else
const int offsetr = blk_row + (i >> 1) * bsl;
const int offsetc = blk_col + (i & 0x01) * bsl;
#endif
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
......@@ -4961,10 +4977,10 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
}
#endif
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (cm->tx_mode == TX_MODE_SELECT)
av1_diff_update_prob(&r, &fc->quarter_tx_size_prob, ACCT_STR);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_LV_MAP
av1_read_txb_probs(fc, cm->tx_mode, &r);
......
......@@ -543,16 +543,22 @@ static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int is_inter,
#if CONFIG_RECT_TX && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (coded_tx_size > max_txsize_lookup[bsize]) {
assert(coded_tx_size == max_txsize_lookup[bsize] + 1);
#if CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT
if (is_quarter_tx_allowed(xd, &xd->mi[0]->mbmi, is_inter)) {
int quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
int quarter_tx;
if (counts) ++counts->quarter_tx_size[quarter_tx];
if (quarter_txsize_lookup[bsize] != max_txsize_lookup[bsize]) {
quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
FRAME_COUNTS *counts = xd->counts;
if (counts) ++counts->quarter_tx_size[quarter_tx];
} else {
quarter_tx = 1;
}
return quarter_tx ? quarter_txsize_lookup[bsize]
: max_txsize_rect_lookup[bsize];
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX_EXT
#endif // CONFIG_RECT_TX_EXT
return max_txsize_rect_lookup[bsize];
}
......@@ -2860,6 +2866,26 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi,
for (idx = 0; idx < width; idx += bw)
read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size,
height != width, idy, idx, r);
#if CONFIG_RECT_TX_EXT
if (is_quarter_tx_allowed(xd, mbmi, inter_block) &&
mbmi->tx_size == max_tx_size) {
int quarter_tx;
if (quarter_txsize_lookup[bsize] != max_tx_size) {
quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR);
if (xd->counts) ++xd->counts->quarter_tx_size[quarter_tx];
} else {
quarter_tx = 1;
}
if (quarter_tx) {
mbmi->tx_size = quarter_txsize_lookup[bsize];
for (idy = 0; idy < tx_size_high_unit[max_tx_size] / 2; ++idy)
for (idx = 0; idx < tx_size_wide_unit[max_tx_size] / 2; ++idx)
mbmi->inter_tx_size[idy][idx] = mbmi->tx_size;
mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size);
}
}
#endif
} else {
mbmi->tx_size = read_tx_size(cm, xd, inter_block, !mbmi->skip, r);
......
......@@ -355,7 +355,12 @@ static void write_tx_size_vartx(const AV1_COMMON *cm, MACROBLOCKD *xd,
return;
}
#if CONFIG_RECT_TX_EXT
if (tx_size == mbmi->inter_tx_size[tx_row][tx_col] ||
mbmi->tx_size == quarter_txsize_lookup[mbmi->sb_type]) {
#else
if (tx_size == mbmi->inter_tx_size[tx_row][tx_col]) {
#endif
#if CONFIG_NEW_MULTISYMBOL
aom_write_symbol(w, 0, ec_ctx->txfm_partition_cdf[ctx], 2);
#else
......@@ -364,6 +369,7 @@ static void write_tx_size_vartx(const AV1_COMMON *cm, MACROBLOCKD *xd,
txfm_partition_update(xd->above_txfm_context + blk_col,
xd->left_txfm_context + blk_row, tx_size, tx_size);
// TODO(yuec): set correct txfm partition update for qttx
} else {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
const int bsl = tx_size_wide_unit[sub_txs];
......@@ -427,11 +433,11 @@ static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
aom_write_symbol(w, depth, ec_ctx->tx_size_cdf[tx_size_cat][tx_size_ctx],
tx_size_cat + 2);
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (is_quarter_tx_allowed(xd, mbmi, is_inter) && tx_size != coded_tx_size)
aom_write(w, tx_size == quarter_txsize_lookup[bsize],
cm->fc->quarter_tx_size_prob);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
}
}
......@@ -963,15 +969,30 @@ static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
token_stats->cost += tmp_token_stats.cost;
#endif
} else {
#if CONFIG_RECT_TX_EXT
int is_qttx = plane_tx_size == quarter_txsize_lookup[plane_bsize];
const TX_SIZE sub_txs = is_qttx ? plane_tx_size : sub_tx_size_map[tx_size];
#else
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
#endif
const int bsl = tx_size_wide_unit[sub_txs];
int i;
assert(bsl > 0);
for (i = 0; i < 4; ++i) {
#if CONFIG_RECT_TX_EXT
int is_wide_tx = tx_size_wide_unit[sub_txs] > tx_size_high_unit[sub_txs];
const int offsetr =
is_qttx ? (is_wide_tx ? i * tx_size_high_unit[sub_txs] : 0)
: blk_row + (i >> 1) * bsl;
const int offsetc =
is_qttx ? (is_wide_tx ? 0 : i * tx_size_wide_unit[sub_txs])
: blk_col + (i & 0x01) * bsl;
#else
const int offsetr = blk_row + (i >> 1) * bsl;
const int offsetc = blk_col + (i & 0x01) * bsl;
#endif
const int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
......@@ -1748,6 +1769,15 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row,
for (idx = 0; idx < width; idx += bw)
write_tx_size_vartx(cm, xd, mbmi, max_tx_size, height != width, idy,
idx, w);
#if CONFIG_RECT_TX_EXT
if (is_quarter_tx_allowed(xd, mbmi, is_inter_block(mbmi)) &&
quarter_txsize_lookup[bsize] != max_tx_size &&
(mbmi->tx_size == quarter_txsize_lookup[bsize] ||
mbmi->tx_size == max_tx_size)) {
aom_write(w, mbmi->tx_size != max_tx_size,
cm->fc->quarter_tx_size_prob);
}
#endif
} else {
set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, skip, xd);
write_selected_tx_size(cm, xd, w);
......@@ -4551,11 +4581,11 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
#if CONFIG_LOOP_RESTORATION
encode_restoration(cm, header_bc);
#endif // CONFIG_LOOP_RESTORATION
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (cm->tx_mode == TX_MODE_SELECT)
av1_cond_prob_diff_update(header_bc, &cm->fc->quarter_tx_size_prob,
cm->counts.quarter_tx_size, probwt);
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_LV_MAP
av1_write_txb_probs(cpi, header_bc);
#endif // CONFIG_LV_MAP
......
......@@ -5474,10 +5474,15 @@ void av1_encode_frame(AV1_COMP *cpi) {
#endif // CONFIG_EXT_INTER
#if CONFIG_VAR_TX
#if CONFIG_RECT_TX_EXT
if (cm->tx_mode == TX_MODE_SELECT && cpi->td.mb.txb_split_count == 0 &&
counts->quarter_tx_size[1] == 0)
#else
if (cm->tx_mode == TX_MODE_SELECT && cpi->td.mb.txb_split_count == 0)
#endif
cm->tx_mode = ALLOW_32X32 + CONFIG_TX64X64;
#else
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && CONFIG_EXT_TX
if (cm->tx_mode == TX_MODE_SELECT && counts->quarter_tx_size[1] == 0) {
#else
if (cm->tx_mode == TX_MODE_SELECT) {
......@@ -5766,9 +5771,17 @@ static void update_txfm_count(MACROBLOCK *x, MACROBLOCKD *xd,
if (blk_row >= max_blocks_high || blk_col >= max_blocks_wide) return;
#if CONFIG_RECT_TX_EXT
if (tx_size == plane_tx_size ||
mbmi->tx_size == quarter_txsize_lookup[mbmi->sb_type]) {
#else
if (tx_size == plane_tx_size) {
#endif
++counts->txfm_partition[ctx][0];
mbmi->tx_size = tx_size;
#if CONFIG_RECT_TX_EXT
if (tx_size == plane_tx_size)
#endif
mbmi->tx_size = tx_size;
txfm_partition_update(xd->above_txfm_context + blk_col,
xd->left_txfm_context + blk_row, tx_size, tx_size);
} else {
......@@ -6082,13 +6095,16 @@ static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td,
++td->counts->tx_size[tx_size_cat][tx_size_ctx][depth];
#endif
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
if (is_quarter_tx_allowed(xd, mbmi, is_inter) &&
mbmi->tx_size != txsize_sqr_up_map[mbmi->tx_size]) {
++td->counts->quarter_tx_size[mbmi->tx_size ==
quarter_txsize_lookup[mbmi->sb_type]];
quarter_txsize_lookup[bsize] != max_txsize_rect_lookup[bsize] &&
(mbmi->tx_size == quarter_txsize_lookup[bsize] ||
mbmi->tx_size == max_txsize_rect_lookup[bsize])) {
++td->counts
->quarter_tx_size[mbmi->tx_size == quarter_txsize_lookup[bsize]];
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_EXT_TX && CONFIG_RECT_TX
assert(IMPLIES(is_rect_tx(tx_size), is_rect_tx_allowed(xd, mbmi)));
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX
......
......@@ -797,16 +797,32 @@ static void encode_block_inter(int plane, int block, int blk_row, int blk_col,
encode_block(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
} else {
assert(tx_size < TX_SIZES_ALL);
#if CONFIG_RECT_TX_EXT
int is_qttx = plane_tx_size == quarter_txsize_lookup[plane_bsize];
const TX_SIZE sub_txs = is_qttx ? plane_tx_size : sub_tx_size_map[tx_size];
if (is_qttx) assert(blk_row == 0 && blk_col == 0 && block == 0);
#else
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
assert(sub_txs < tx_size);
#endif
// This is the square transform block partition entry point.
int bsl = tx_size_wide_unit[sub_txs];
int i;
assert(bsl > 0);
for (i = 0; i < 4; ++i) {
#if CONFIG_RECT_TX_EXT
int is_wide_tx = tx_size_wide_unit[sub_txs] > tx_size_high_unit[sub_txs];
const int offsetr =
is_qttx ? (is_wide_tx ? i * tx_size_high_unit[sub_txs] : 0)
: blk_row + ((i >> 1) * bsl);
const int offsetc =
is_qttx ? (is_wide_tx ? 0 : i * tx_size_wide_unit[sub_txs])
: blk_col + ((i & 0x01) * bsl);
#else
const int offsetr = blk_row + ((i >> 1) * bsl);
const int offsetc = blk_col + ((i & 0x01) * bsl);
#endif
int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
if (offsetr >= max_blocks_high || offsetc >= max_blocks_wide) continue;
......
......@@ -143,7 +143,7 @@ static void fwd_txfm_64x64(const int16_t *src_diff, tran_low_t *coeff,
}
#endif // CONFIG_TX64X64
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
static void fwd_txfm_16x4(const int16_t *src_diff, tran_low_t *coeff,
int diff_stride, TxfmParam *txfm_param) {
#if CONFIG_LGT
......@@ -179,7 +179,7 @@ static void fwd_txfm_8x32(const int16_t *src_diff, tran_low_t *coeff,
av1_fht8x32(src_diff, coeff, diff_stride, txfm_param);
#endif
}
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
#if CONFIG_CHROMA_2X2
static void highbd_fwd_txfm_2x2(const int16_t *src_diff, tran_low_t *coeff,
......@@ -483,7 +483,7 @@ void av1_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
#if CONFIG_CHROMA_2X2
case TX_2X2: fwd_txfm_2x2(src_diff, coeff, diff_stride, txfm_param); break;
#endif
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
case TX_4X16:
fwd_txfm_4x16(src_diff, coeff, diff_stride, txfm_param);
break;
......@@ -496,7 +496,7 @@ void av1_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
case TX_32X8:
fwd_txfm_32x8(src_diff, coeff, diff_stride, txfm_param);
break;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
default: assert(0); break;
}
}
......
......@@ -669,7 +669,7 @@ static void get_entropy_contexts_plane(
for (i = 0; i < num_4x4_h; i += 8)
t_left[i] = !!*(const uint64_t *)&left[i];
break;
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
case TX_4X16:
for (i = 0; i < num_4x4_w; i += 2)
t_above[i] = !!*(const uint16_t *)&above[i];
......@@ -696,7 +696,7 @@ static void get_entropy_contexts_plane(
for (i = 0; i < num_4x4_h; i += 4)
t_left[i] = !!*(const uint32_t *)&left[i];
break;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
default: assert(0 && "Invalid transform size."); break;
}
......@@ -770,7 +770,7 @@ static void get_entropy_contexts_plane(
for (i = 0; i < num_4x4_h; i += 4)
t_left[i] = !!*(const uint32_t *)&left[i];
break;
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
case TX_4X16:
memcpy(t_above, above, sizeof(ENTROPY_CONTEXT) * num_4x4_w);
for (i = 0; i < num_4x4_h; i += 4)
......@@ -793,7 +793,7 @@ static void get_entropy_contexts_plane(
for (i = 0; i < num_4x4_h; i += 2)
t_left[i] = !!*(const uint16_t *)&left[i];
break;
#endif // CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#endif
default: assert(0 && "Invalid transform size."); break;
}
}
......
This diff is collapsed.
......@@ -597,16 +597,31 @@ void tokenize_vartx(ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run,
cost_coeffs_b(plane, block, blk_row, blk_col, plane_bsize, tx_size, arg);
#endif
} else {
#if CONFIG_RECT_TX_EXT
int is_qttx = plane_tx_size == quarter_txsize_lookup[plane_bsize];
const TX_SIZE sub_txs = is_qttx ? plane_tx_size : sub_tx_size_map[tx_size];
#else
// Half the block size in transform block unit.
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
#endif
const int bsl = tx_size_wide_unit[sub_txs];
int i;
assert(bsl > 0);
for (i = 0; i < 4; ++i) {
#if CONFIG_RECT_TX_EXT
int is_wide_tx = tx_size_wide_unit[sub_txs] > tx_size_high_unit[sub_txs];
const int offsetr =
is_qttx ? (is_wide_tx ? i * tx_size_high_unit[sub_txs] : 0)
: blk_row + ((i >> 1) * bsl);
const int offsetc =
is_qttx ? (is_wide_tx ? 0 : i * tx_size_wide_unit[sub_txs])
: blk_col + ((i & 0x01) * bsl);
#else
const int offsetr = blk_row + ((i >> 1) * bsl);
const int offsetc = blk_col + ((i & 0x01) * bsl);
#endif
int step = tx_size_wide_unit[sub_txs] * tx_size_high_unit[sub_txs];
......
......@@ -555,6 +555,7 @@ post_process_cmdline() {
enabled ext_comp_refs && enable_feature var_refs
enabled ext_comp_refs && disable_feature one_sided_compound
enabled altref2 && enable_feature ext_refs
enabled rect_tx_ext && enable_feature rect_tx
if ! enabled delta_q && enabled ext_delta_q; then
log_echo "ext_delta_q requires delta_q, so disabling ext_delta_q"
......
......@@ -678,7 +678,7 @@ int main(int argc, const char **argv) {
/* Transform size */
// TODO(yuec): av1_tx_size_tree has variable sizes, so needs special handling
#if CONFIG_EXT_TX && CONFIG_RECT_TX && CONFIG_RECT_TX_EXT
#if CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)
cts_each_dim[0] = 2;
optimize_entropy_table(&fc.quarter_tx_size[0], probsfile, 1, cts_each_dim,
NULL, 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment