Commit e4e18fcc authored by Debargha Mukherjee's avatar Debargha Mukherjee
Browse files

Enable 4:1 -> 2:1 -> 1:1 splits for 4:1 transforms

Also, splits the sub_tx_size_map array into inter and intra
in order to enable the new 4:1 transforms for inter and
intra separately.
Includes refactoring such as removing the intra_tx_size_cat_lookup
array since it is unnecessary, and consolidating the
max_txsize_rect_lookup array for convenience.

Change-Id: I112553bab612dafb973611c87f36a43e1ac4be85
parent 4d8c6fc2
......@@ -944,10 +944,7 @@ static INLINE int is_rect_tx_allowed(const MACROBLOCKD *xd,
}
static INLINE TX_SIZE get_max_rect_tx_size(BLOCK_SIZE bsize, int is_inter) {
if (is_inter)
return max_txsize_rect_lookup[bsize];
else
return max_txsize_rect_intra_lookup[bsize];
return max_txsize_rect_lookup[is_inter][bsize];
}
static INLINE TX_SIZE tx_size_from_tx_mode(BLOCK_SIZE bsize, TX_MODE tx_mode,
......@@ -1102,18 +1099,31 @@ static INLINE int bsize_to_max_depth(BLOCK_SIZE bsize, int is_inter) {
int depth = 0;
while (depth < MAX_TX_DEPTH && tx_size != TX_4X4) {
depth++;
tx_size = sub_tx_size_map[tx_size];
tx_size = sub_tx_size_map[is_inter][tx_size];
}
return depth;
}
static INLINE int bsize_to_tx_size_cat(BLOCK_SIZE bsize, int is_inter) {
TX_SIZE tx_size = get_max_rect_tx_size(bsize, is_inter);
assert(tx_size != TX_4X4);
int depth = 0;
while (tx_size != TX_4X4) {
depth++;
tx_size = sub_tx_size_map[is_inter][tx_size];
assert(depth < 10);
}
assert(depth <= MAX_TX_CATS);
return depth - 1;
}
static INLINE int tx_size_to_depth(TX_SIZE tx_size, BLOCK_SIZE bsize,
int is_inter) {
TX_SIZE ctx_size = get_max_rect_tx_size(bsize, is_inter);
int depth = 0;
while (tx_size != ctx_size) {
depth++;
ctx_size = sub_tx_size_map[ctx_size];
ctx_size = sub_tx_size_map[is_inter][ctx_size];
assert(depth <= MAX_TX_DEPTH);
}
return depth;
......@@ -1123,7 +1133,7 @@ static INLINE TX_SIZE depth_to_tx_size(int depth, BLOCK_SIZE bsize,
int is_inter) {
TX_SIZE max_tx_size = get_max_rect_tx_size(bsize, is_inter);
TX_SIZE tx_size = max_tx_size;
for (int d = 0; d < depth; ++d) tx_size = sub_tx_size_map[tx_size];
for (int d = 0; d < depth; ++d) tx_size = sub_tx_size_map[is_inter][tx_size];
return tx_size;
}
......
......@@ -647,126 +647,128 @@ static const TX_SIZE max_txsize_lookup[BLOCK_SIZES_ALL] = {
#endif // CONFIG_EXT_PARTITION
};
static const TX_SIZE max_txsize_rect_intra_lookup[BLOCK_SIZES_ALL] = {
// 2X2, 2X4, 4X2,
TX_4X4, TX_4X4, TX_4X4,
// 4X4
TX_4X4,
// 4X8, 8X4, 8X8
TX_4X8, TX_8X4, TX_8X8,
// 8X16, 16X8, 16X16
TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
TX_16X32, TX_32X16, TX_32X32,
#if CONFIG_TX64X64
// 32X64, 64X32,
TX_32X64, TX_64X32,
// 64X64
TX_64X64,
static const TX_SIZE max_txsize_rect_lookup[2][BLOCK_SIZES_ALL] = {
{
// Intra
// 2X2, 2X4, 4X2,
TX_4X4, TX_4X4, TX_4X4,
// 4X4
TX_4X4,
// 4X8, 8X4, 8X8
TX_4X8, TX_8X4, TX_8X8,
// 8X16, 16X8, 16X16
TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
TX_16X32, TX_32X16, TX_32X32,
#if CONFIG_TX64X64
// 32X64, 64X32,
TX_32X64, TX_64X32,
// 64X64
TX_64X64,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
#endif // CONFIG_EXT_PARTITION
#else
// 32X64, 64X32,
TX_32X32, TX_32X32,
// 64X64
TX_32X32,
// 32X64, 64X32,
TX_32X32, TX_32X32,
// 64X64
TX_32X32,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_32X32, TX_32X32, TX_32X32,
// 64x128, 128x64, 128x128
TX_32X32, TX_32X32, TX_32X32,
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
// 4x16, 16x4,
TX_4X16, TX_16X4,
// 8x32, 32x8
TX_8X32, TX_32X8,
// 4x16, 16x4,
TX_4X16, TX_16X4,
// 8x32, 32x8
TX_8X32, TX_32X8,
#if CONFIG_TX64X64
// 16x64, 64x16
TX_16X64, TX_64X16,
// 16x64, 64x16
TX_16X64, TX_64X16,
#else
// 16x64, 64x16
TX_16X32, TX_32X16,
// 16x64, 64x16
TX_16X32, TX_32X16,
#endif // CONFIG_TX64X64
#else
// 4x16, 16x4,
TX_4X8, TX_8X4,
// 8x32, 32x8
TX_8X16, TX_16X8,
// 16x64, 64x16
TX_16X32, TX_32X16,
// 4x16, 16x4,
TX_4X8, TX_8X4,
// 8x32, 32x8
TX_8X16, TX_16X8,
// 16x64, 64x16
TX_16X32, TX_32X16,
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
#if CONFIG_EXT_PARTITION
#if CONFIG_TX64X64
// 32x128 128x32
TX_32X64, TX_64X32
// 32x128 128x32
TX_32X64, TX_64X32
#else
// 32x128 128x32
TX_32X32, TX_32X32
// 32x128 128x32
TX_32X32, TX_32X32
#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION
};
static const TX_SIZE max_txsize_rect_lookup[BLOCK_SIZES_ALL] = {
// 2X2, 2X4, 4X2,
TX_4X4, TX_4X4, TX_4X4,
// 4X4
TX_4X4,
// 4X8, 8X4, 8X8
TX_4X8, TX_8X4, TX_8X8,
// 8X16, 16X8, 16X16
TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
TX_16X32, TX_32X16, TX_32X32,
#if CONFIG_TX64X64
// 32X64, 64X32,
TX_32X64, TX_64X32,
// 64X64
TX_64X64,
}, {
// Inter
// 2X2, 2X4, 4X2,
TX_4X4, TX_4X4, TX_4X4,
// 4X4
TX_4X4,
// 4X8, 8X4, 8X8
TX_4X8, TX_8X4, TX_8X8,
// 8X16, 16X8, 16X16
TX_8X16, TX_16X8, TX_16X16,
// 16X32, 32X16, 32X32
TX_16X32, TX_32X16, TX_32X32,
#if CONFIG_TX64X64
// 32X64, 64X32,
TX_32X64, TX_64X32,
// 64X64
TX_64X64,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
// 64x128, 128x64, 128x128
TX_64X64, TX_64X64, TX_64X64,
#endif // CONFIG_EXT_PARTITION
#else
// 32X64, 64X32,
TX_32X32, TX_32X32,
// 64X64
TX_32X32,
// 32X64, 64X32,
TX_32X32, TX_32X32,
// 64X64
TX_32X32,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_32X32, TX_32X32, TX_32X32,
// 64x128, 128x64, 128x128
TX_32X32, TX_32X32, TX_32X32,
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
// 4x16, 16x4, 8x32
TX_4X16, TX_16X4, TX_8X32,
// 32x8
TX_32X8,
// 4x16, 16x4, 8x32
TX_4X16, TX_16X4, TX_8X32,
// 32x8
TX_32X8,
#if CONFIG_TX64X64
// 16x64, 64x16
TX_16X64, TX_64X16,
// 16x64, 64x16
TX_16X64, TX_64X16,
#else
// 16x64, 64x16
TX_16X32, TX_32X16,
// 16x64, 64x16
TX_16X32, TX_32X16,
#endif // CONFIG_TX64X64
#else
// 4x16, 16x4, 8x32
TX_4X8, TX_8X4, TX_8X16,
// 32x8
TX_16X8,
// 16x64, 64x16
TX_16X32, TX_32X16,
// 4x16, 16x4, 8x32
TX_4X8, TX_8X4, TX_8X16,
// 32x8
TX_16X8,
// 16x64, 64x16
TX_16X32, TX_32X16,
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
#if CONFIG_EXT_PARTITION
#if CONFIG_TX64X64
// 32x128 128x32
TX_32X64, TX_64X32
// 32x128 128x32
TX_32X64, TX_64X32
#else
// 32x128 128x32
TX_32X32, TX_32X32
// 32x128 128x32
TX_32X32, TX_32X32
#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION
},
};
static const TX_TYPE_1D vtx_tab[TX_TYPES] = {
......@@ -781,88 +783,89 @@ static const TX_TYPE_1D htx_tab[TX_TYPES] = {
IDTX_1D, DCT_1D, IDTX_1D, ADST_1D, IDTX_1D, FLIPADST_1D,
};
// Same as "max_txsize_lookup[bsize] - TX_8X8", except for rectangular
// block which may use a rectangular transform, in which case it is
// "(max_txsize_lookup[bsize] + 1) - TX_8X8", invalid for bsize < 8X8
#define TXSIZE_CAT_INVALID (-1)
static const int32_t intra_tx_size_cat_lookup[BLOCK_SIZES_ALL] = {
// 2X2, 2X4, 4X2,
TXSIZE_CAT_INVALID, TXSIZE_CAT_INVALID, TXSIZE_CAT_INVALID,
// 4X4,
TXSIZE_CAT_INVALID,
// 4X8, 8X4, 8X8,
TX_8X8 - TX_8X8, TX_8X8 - TX_8X8, TX_8X8 - TX_8X8,
// 8X16, 16X8, 16X16
TX_16X16 - TX_8X8, TX_16X16 - TX_8X8, TX_16X16 - TX_8X8,
// 16X32, 32X16, 32X32
TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
#if CONFIG_TX64X64
// 32X64, 64X32,
TX_64X64 - TX_8X8, TX_64X64 - TX_8X8,
// 64X64
TX_64X64 - TX_8X8,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_64X64 - TX_8X8, TX_64X64 - TX_8X8, TX_64X64 - TX_8X8,
#endif // CONFIG_EXT_PARTITION
#else
// 32X64, 64X32,
TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
// 64X64
TX_32X32 - TX_8X8,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
TX_32X32 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
#endif // CONFIG_EXT_PARTITION
#endif // CONFIG_TX64X64
// TODO(david.barker): Change these if we support rectangular transforms
// for 4:1 shaped partitions
// 4x16, 16x4, 8x32
TX_8X8 - TX_8X8, TX_8X8 - TX_8X8, TX_16X16 - TX_8X8,
// 32x8, 16x64, 64x16
TX_16X16 - TX_8X8, TX_32X32 - TX_8X8, TX_32X32 - TX_8X8,
#if CONFIG_EXT_PARTITION
#if CONFIG_TX64X64
// 32x128, 128x32
TX_64X64 - TX_8X8, TX_64X64 - TX_8X8
#else
// 32x128, 128x32
TX_32X32 - TX_8X8, TX_32X32 - TX_8X8
#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION
};
#define inter_tx_size_cat_lookup intra_tx_size_cat_lookup
/* clang-format on */
static const TX_SIZE sub_tx_size_map[TX_SIZES_ALL] = {
TX_4X4, // TX_4X4
TX_4X4, // TX_8X8
TX_8X8, // TX_16X16
TX_16X16, // TX_32X32
#if CONFIG_TX64X64
TX_32X32, // TX_64X64
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_4X4, // TX_8X4
TX_8X8, // TX_8X16
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
static const TX_SIZE sub_tx_size_map[2][TX_SIZES_ALL] = {
{
// Intra
TX_4X4, // TX_4X4
TX_4X4, // TX_8X8
TX_8X8, // TX_16X16
TX_16X16, // TX_32X32
#if CONFIG_TX64X64
TX_32X32, // TX_64X64
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_4X4, // TX_8X4
TX_8X8, // TX_8X16
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
TX_4X8, // TX_4X16
TX_8X4, // TX_16X4
TX_8X16, // TX_8X32
TX_16X8, // TX_32X8
#if CONFIG_TX64X64
TX_16X32, // TX_16X64
TX_32X16, // TX_64X16
#endif // CONFIG_TX64X64
#else
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
TX_8X8, // TX_32X8
#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
TX_8X8, // TX_32X8
TX_16X16, // TX_16X64
TX_16X16, // TX_64X16
#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT_INTRA
},
{
// Inter
TX_4X4, // TX_4X4
TX_4X4, // TX_8X8
TX_8X8, // TX_16X16
TX_16X16, // TX_32X32
#if CONFIG_TX64X64
TX_32X32, // TX_64X64
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X8
TX_4X4, // TX_8X4
TX_8X8, // TX_8X16
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
#endif // CONFIG_TX64X64
#if CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
TX_4X8, // TX_4X16
TX_8X4, // TX_16X4
TX_8X16, // TX_8X32
TX_16X8, // TX_32X8
#if CONFIG_TX64X64
TX_16X32, // TX_16X64
TX_32X16, // TX_64X16
#endif // CONFIG_TX64X64
#else
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
TX_8X8, // TX_32X8
#if CONFIG_TX64X64
TX_16X16, // TX_16X64
TX_16X16, // TX_64X16
#endif // CONFIG_TX64X64
TX_16X16, // TX_16X64
TX_16X16, // TX_64X16
#endif // CONFIG_TX64X64
#endif // CONFIG_EXT_PARTITION_TYPES && CONFIG_RECT_TX_EXT
},
};
static const TX_SIZE txsize_horz_map[TX_SIZES_ALL] = {
......@@ -1733,7 +1736,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
#if CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X8 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X4 }, { TX_4X8, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X8 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X8 }, { TX_4X4, TX_4X4 } },
......@@ -1743,7 +1746,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X16, TX_4X8 }, { TX_4X4, TX_4X4 } },
{ { TX_4X16, TX_4X8 }, { TX_4X16, TX_4X8 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X16, TX_4X8 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X8 }, { TX_4X4, TX_4X4 } },
......@@ -1762,7 +1765,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
{ { TX_8X4, TX_8X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
......@@ -1772,7 +1775,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_16X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
{ { TX_16X4, TX_16X4 }, { TX_8X4, TX_8X4 } },
{ { TX_8X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
{ { TX_16X4, TX_4X4 }, { TX_8X4, TX_4X4 } },
#if CONFIG_TX64X64
......@@ -1791,7 +1794,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X4, TX_8X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X16, TX_8X16 }, { TX_4X8, TX_4X8 } },
{ { TX_8X16, TX_8X8 }, { TX_4X16, TX_4X8 } },
{ { TX_8X8, TX_8X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X16, TX_8X16 }, { TX_4X8, TX_4X8 } },
{ { TX_8X16, TX_8X16 }, { TX_4X8, TX_4X8 } },
......@@ -1820,7 +1823,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X8, TX_4X4 }, { TX_4X8, TX_4X4 } },
{ { TX_8X4, TX_8X4 }, { TX_8X4, TX_8X4 } },
{ { TX_8X8, TX_8X4 }, { TX_8X8, TX_8X4 } },
{ { TX_16X8, TX_8X4 }, { TX_16X8, TX_8X4 } },
{ { TX_16X8, TX_16X4 }, { TX_8X8, TX_8X4 } },
{ { TX_16X8, TX_8X4 }, { TX_16X8, TX_8X4 } },
{ { TX_16X8, TX_8X4 }, { TX_16X8, TX_8X4 } },
#if CONFIG_TX64X64
......@@ -1849,7 +1852,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X4, TX_8X4 }, { TX_8X4, TX_8X4 } },
{ { TX_8X16, TX_8X16 }, { TX_8X16, TX_8X16 } },
{ { TX_16X8, TX_16X8 }, { TX_8X8, TX_8X8 } },
{ { TX_16X32, TX_16X32 }, { TX_8X16, TX_8X16 } },
{ { TX_16X32, TX_16X16 }, { TX_8X32, TX_8X16 } },
{ { TX_16X16, TX_16X16 }, { TX_8X16, TX_8X16 } },
#if CONFIG_TX64X64
{ { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } },
......@@ -1878,7 +1881,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X16, TX_8X8 }, { TX_8X16, TX_8X8 } },
{ { TX_16X8, TX_16X8 }, { TX_16X8, TX_16X8 } },
{ { TX_16X16, TX_16X8 }, { TX_16X16, TX_16X8 } },
{ { TX_32X16, TX_16X8 }, { TX_32X16, TX_16X8 } },
{ { TX_32X16, TX_32X8 }, { TX_16X16, TX_16X8 } },
#if CONFIG_TX64X64
{ { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
{ { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
......
......@@ -285,7 +285,7 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
pd->dst.stride, max_scan_line, eob, cm->reduced_tx_set_used);
*eob_total += eob;
} else {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
assert(IMPLIES(tx_size <= TX_4X4, sub_txs == tx_size));
assert(IMPLIES(tx_size > TX_4X4, sub_txs < tx_size));
const int bsw = tx_size_wide_unit[sub_txs];
......@@ -532,7 +532,7 @@ static void decode_token_and_recon_block(AV1Decoder *const pbi,
const int is_split =
(l_max_tx_size != mbmi->inter_tx_size[0][0] && bsize == bsizec &&
txsize_to_bsize[l_max_tx_size] == bsizec);
if (is_split) max_tx_size = sub_tx_size_map[max_tx_size];
if (is_split) max_tx_size = sub_tx_size_map[1][max_tx_size];
}
#endif // DISABLE_VARTX_FOR_CHROMA == 2
const int bh_var_tx = tx_size_high_unit[max_tx_size];
......
......@@ -448,7 +448,7 @@ static void read_tx_size_vartx(AV1_COMMON *cm, MACROBLOCKD *xd,
is_split = aom_read_symbol(r, ec_ctx->txfm_partition_cdf[ctx], 2, ACCT_STR);
if (is_split) {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
const int bsw = tx_size_wide_unit[sub_txs];
const int bsh = tx_size_high_unit[sub_txs];
......@@ -495,8 +495,7 @@ static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
// TODO(debargha): Clean up the logic here. This function should only
// be called for intra.
const BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
const int32_t tx_size_cat = is_inter ? inter_tx_size_cat_lookup[bsize]
: intra_tx_size_cat_lookup[bsize];
const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize, is_inter);
const int max_depths = bsize_to_max_depth(bsize, 0);
const int ctx = get_tx_size_context(xd);
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
......@@ -957,7 +956,7 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
const TX_SIZE mtx_size =
get_max_rect_tx_size(xd->mi[0]->mbmi.sb_type, inter_block);
const TX_SIZE tx_size =
inter_block ? AOMMAX(sub_tx_size_map[mtx_size], mbmi->min_tx_size)
inter_block ? AOMMAX(sub_tx_size_map[1][mtx_size], mbmi->min_tx_size)
: mbmi->tx_size;
#endif // !CONFIG_TXK_SEL
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
......
......@@ -230,7 +230,7 @@ static void write_tx_size_vartx(const AV1_COMMON *cm, MACROBLOCKD *xd,
xd->left_txfm_context + blk_row, tx_size, tx_size);
// TODO(yuec): set correct txfm partition update for qttx
} else {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
const int bsw = tx_size_wide_unit[sub_txs];
const int bsh = tx_size_high_unit[sub_txs];
......@@ -262,9 +262,9 @@ static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
if (block_signals_txsize(bsize)) {
const TX_SIZE tx_size = mbmi->tx_size;
const int tx_size_ctx = get_tx_size_context(xd);
const int32_t tx_size_cat = intra_tx_size_cat_lookup[bsize];
const int depth = tx_size_to_depth(tx_size, bsize, 0);
const int max_depths = bsize_to_max_depth(bsize, 0);
const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize, 0);
assert(depth >= 0 && depth <= max_depths);
assert(!is_inter_block(mbmi));
......@@ -536,7 +536,7 @@ static void pack_txb_tokens(aom_writer *w, AV1_COMMON *cm, MACROBLOCK *const x,
token_stats->cost += tmp_token_stats.cost;
#endif
} else {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
const int bsw = tx_size_wide_unit[sub_txs];
const int bsh = tx_size_high_unit[sub_txs];
......@@ -592,7 +592,7 @@ static void pack_txb_tokens(aom_writer *w, const TOKENEXTRA **tp,
token_stats->cost += tmp_token_stats.cost;
#endif
} else {
const TX_SIZE sub_txs = sub_tx_size_map[tx_size];
const TX_SIZE sub_txs = sub_tx_size_map[1][tx_size];
const int bsw = tx_size_wide_unit[sub_txs];
const int bsh = tx_size_high_unit[sub_txs];
......@@ -1111,7 +1111,7 @@ void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd,
const TX_SIZE mtx_size =
get_max_rect_tx_size(xd->mi[0]->mbmi.sb_type, is_inter);
const TX_SIZE tx_size =
is_inter ? AOMMAX(sub_tx_size_map[mtx_size], mbmi->min_tx_size)
is_inter ? AOMMAX(sub_tx_size_map[1][mtx_size], mbmi->min_tx_size)
: mbmi->tx_size;
#endif // !CONFIG_TXK_SEL
FRAME_CONTEXT *ec_ctx = xd->tile_ctx;
......@@ -1963,7 +1963,7 @@ static void write_inter_txb_coeff(AV1_COMMON *const cm, MACROBLOCK *const x,
const int is_split =
(l_max_tx_size != mbmi->inter_tx_size[0][0] && bsize == bsizec &&
txsize_to_bsize[l_max_tx_size] == bsizec);
if (is_split) max_tx_size = sub_tx_size_map[max_tx_size];
if (is_split) max_tx_size = sub_tx_size_map[1][max_tx_size];
}
#endif // DISABLE_VARTX_FOR_CHROMA == 2
const int step =
......
......@@ -4424,7 +4424,7 @@ static void sum_intra_stats(FRAME_COUNTS *counts, MACROBLOCKD *xd,
allow_update_cdf) {
const TX_SIZE tx_size = mbmi->tx_size;
const int tx_size_ctx = get_tx_size_context(xd);
const int32_t tx_size_cat = intra_tx_size_cat_lookup[bsize];
const int32_t tx_size_cat = bsize_to_tx_size_cat(bsize, 0);
const int depth = tx_size_to_depth(tx_size, bsize, 0);
const int max_depths = bsize_to_max_depth(bsize, 0);
update_cdf(fc->tx_size_cdf[tx_size_cat][tx_size_ctx], depth,
......@@ -4590,7 +4590,7 @@ static void update_txfm_count(MACROBLOCK *x, MACROBLOCKD *xd,
txfm_partition_update(xd->above_txfm_context + blk_col,