Commit 2b43501b authored by Debargha Mukherjee's avatar Debargha Mukherjee

Implement 64x32 and 32x64 transforms

Change-Id: Ifa983d83a509cdfad78f6400df7d60c8f5b4f68c
parent 26d3e45f
......@@ -257,6 +257,7 @@ void av1_fwd_txfm2d_32x32_c(const int16_t *input, int32_t *output, int stride,
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
#if CONFIG_TX64X64
void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
int tx_type, int bd) {
int32_t txfm_buf[64 * 64];
......@@ -264,6 +265,21 @@ void av1_fwd_txfm2d_64x64_c(const int16_t *input, int32_t *output, int stride,
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_32x64_c(const int16_t *input, int32_t *output, int stride,
int tx_type, int bd) {
int32_t txfm_buf[32 * 64];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_32x64_cfg(tx_type);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
void av1_fwd_txfm2d_64x32_c(const int16_t *input, int32_t *output, int stride,
int tx_type, int bd) {
int32_t txfm_buf[64 * 32];
TXFM_2D_FLIP_CFG cfg = av1_get_fwd_txfm_64x32_cfg(tx_type);
fwd_txfm2d_c(input, output, stride, &cfg, txfm_buf, bd);
}
#endif // CONFIG_TX64X64
static const TXFM_1D_CFG *fwd_txfm_col_cfg_ls[TX_TYPES_1D][TX_SIZES] = {
// DCT
{
......@@ -342,6 +358,39 @@ TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size) {
return cfg;
}
#if CONFIG_TX64X64
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(int tx_type) {
TXFM_2D_FLIP_CFG cfg;
const int tx_type_row = htx_tab[tx_type];
const int tx_size_row = txsize_horz_map[TX_32X64];
switch (tx_type) {
case DCT_DCT:
cfg.col_cfg = &fwd_txfm_1d_col_cfg_dct_64;
cfg.row_cfg = fwd_txfm_row_cfg_ls[tx_type_row][tx_size_row];
cfg.ud_flip = 0;
cfg.lr_flip = 0;
break;
default: assert(0);
}
return cfg;
}
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(int tx_type) {
TXFM_2D_FLIP_CFG cfg;
const int tx_type_col = vtx_tab[tx_type];
const int tx_size_col = txsize_vert_map[TX_64X32];
switch (tx_type) {
case DCT_DCT:
cfg.col_cfg = fwd_txfm_col_cfg_ls[tx_type_col][tx_size_col];
cfg.row_cfg = &fwd_txfm_1d_row_cfg_dct_64;
cfg.ud_flip = 0;
cfg.lr_flip = 0;
break;
default: assert(0);
}
return cfg;
}
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type) {
TXFM_2D_FLIP_CFG cfg;
switch (tx_type) {
......@@ -358,3 +407,4 @@ TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type) {
}
return cfg;
}
#endif // CONFIG_TX64X64
......@@ -349,12 +349,24 @@ void av1_inv_txfm2d_add_32x32_c(const int32_t *input, uint16_t *output,
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X32, bd);
}
#if CONFIG_TX64X64
void av1_inv_txfm2d_add_64x64_c(const int32_t *input, uint16_t *output,
int stride, int tx_type, int bd) {
int txfm_buf[64 * 64 + 64 + 64];
TXFM_2D_FLIP_CFG cfg = av1_get_inv_txfm_64x64_cfg(tx_type);
inv_txfm2d_add_c(input, output, stride, &cfg, txfm_buf, -4, bd);
#if CONFIG_TX64X64
assert(fwd_shift_sum[TX_64X64] == -4);
#endif
}
void av1_inv_txfm2d_add_64x32_c(const int32_t *input, uint16_t *output,
int stride, int tx_type, int bd) {
int txfm_buf[64 * 32 + 64 + 64];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X64, bd);
}
void av1_inv_txfm2d_add_32x64_c(const int32_t *input, uint16_t *output,
int stride, int tx_type, int bd) {
int txfm_buf[64 * 32 + 64 + 64];
inv_txfm2d_add_facade(input, output, stride, txfm_buf, tx_type, TX_32X64, bd);
}
#endif // CONFIG_TX64X64
......@@ -2813,6 +2813,10 @@ static const uint32_t av1_transform_masks[NUM_EDGE_DIRS][TX_SIZES_ALL] = {
16 - 1, // TX_16X8
16 - 1, // TX_16X32
32 - 1, // TX_32X16
#if CONFIG_TX64X64
32 - 1, // TX_32X64
64 - 1, // TX_64X32
#endif // CONFIG_TX64X64
4 - 1, // TX_4X16
16 - 1, // TX_16X4
8 - 1, // TX_8X32
......@@ -2835,6 +2839,10 @@ static const uint32_t av1_transform_masks[NUM_EDGE_DIRS][TX_SIZES_ALL] = {
8 - 1, // TX_16X8
32 - 1, // TX_16X32
16 - 1, // TX_32X16
#if CONFIG_TX64X64
64 - 1, // TX_32X64
32 - 1, // TX_64X32
#endif // CONFIG_TX64X64
16 - 1, // TX_4X16
4 - 1, // TX_16X4
32 - 1, // TX_8X32
......
......@@ -160,6 +160,8 @@ add_proto qw/void av1_iht32x32_1024_add/, "const tran_low_t *input, uint8_t *out
if (aom_config("CONFIG_TX64X64") eq "yes") {
add_proto qw/void av1_iht64x64_4096_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
add_proto qw/void av1_iht32x64_2048_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
add_proto qw/void av1_iht64x32_2048_add/, "const tran_low_t *input, uint8_t *output, int pitch, const struct txfm_param *param";
}
if (aom_config("CONFIG_NEW_QUANT") eq "yes") {
......@@ -285,7 +287,11 @@ add_proto qw/void av1_inv_txfm2d_add_32x32/, "const int32_t *input, uint16_t *ou
if (aom_config("CONFIG_DAALA_DCT32") ne "yes") {
specialize qw/av1_inv_txfm2d_add_32x32 avx2/;
}
add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
if (aom_config("CONFIG_TX64X64") eq "yes") {
add_proto qw/void av1_inv_txfm2d_add_64x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_64x32/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
add_proto qw/void av1_inv_txfm2d_add_32x64/, "const int32_t *input, uint16_t *output, int stride, int tx_type, int bd";
}
#
# Encoder functions below this point.
......@@ -354,6 +360,8 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
if (aom_config("CONFIG_TX64X64") eq "yes") {
add_proto qw/void av1_fht64x64/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
add_proto qw/void av1_fht64x32/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
add_proto qw/void av1_fht32x64/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
}
add_proto qw/void av1_fht4x8/, "const int16_t *input, tran_low_t *output, int stride, struct txfm_param *param";
......@@ -396,7 +404,7 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
}
}
add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bsx, int bsy, int tx_type";
#fwd txfm
add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
......@@ -421,9 +429,14 @@ if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
if (aom_config("CONFIG_DAALA_DCT32") ne "yes") {
specialize qw/av1_fwd_txfm2d_32x32 sse4_1/;
}
add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT64") ne "yes") {
specialize qw/av1_fwd_txfm2d_64x64 sse4_1/;
if (aom_config("CONFIG_TX64X64") eq "yes") {
add_proto qw/void av1_fwd_txfm2d_32x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_64x32/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_64x64/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
if (aom_config("CONFIG_DAALA_DCT64") ne "yes") {
specialize qw/av1_fwd_txfm2d_64x64 sse4_1/;
}
}
#
# Motion search
......
......@@ -207,6 +207,8 @@ static INLINE int av1_rotate_tx_size(int tx_size) {
case TX_32X32: return TX_32X32;
#if CONFIG_TX64X64
case TX_64X64: return TX_64X64;
case TX_32X64: return TX_64X32;
case TX_64X32: return TX_32X64;
#endif
case TX_4X8: return TX_8X4;
case TX_8X4: return TX_4X8;
......@@ -352,7 +354,11 @@ void av1_gen_inv_stage_range(int8_t *stage_range_col, int8_t *stage_range_row,
int bd);
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_cfg(int tx_type, int tx_size);
#if CONFIG_TX64X64
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x64_cfg(int tx_type);
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_64x32_cfg(int tx_type);
TXFM_2D_FLIP_CFG av1_get_fwd_txfm_32x64_cfg(int tx_type);
#endif // CONFIG_TX64X64
TXFM_2D_FLIP_CFG av1_get_inv_txfm_cfg(int tx_type, int tx_size);
#ifdef __cplusplus
}
......
......@@ -867,6 +867,10 @@ static const TX_SIZE sub_tx_size_map[TX_SIZES_ALL] = {
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
......@@ -890,6 +894,10 @@ static const TX_SIZE txsize_horz_map[TX_SIZES_ALL] = {
TX_16X16, // TX_16X8
TX_16X16, // TX_16X32
TX_32X32, // TX_32X16
#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_64X64, // TX_64X32
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X16
TX_16X16, // TX_16X4
TX_8X8, // TX_8X32
......@@ -913,6 +921,10 @@ static const TX_SIZE txsize_vert_map[TX_SIZES_ALL] = {
TX_8X8, // TX_16X8
TX_32X32, // TX_16X32
TX_16X16, // TX_32X16
#if CONFIG_TX64X64
TX_64X64, // TX_32X64
TX_32X32, // TX_64X32
#endif // CONFIG_TX64X64
TX_16X16, // TX_4X16
TX_4X4, // TX_16X4
TX_32X32, // TX_8X32
......@@ -930,11 +942,15 @@ static const int tx_size_wide[TX_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2
2,
#endif
4, 8, 16, 32,
4, 8, 16, 32,
#if CONFIG_TX64X64
64,
#endif // CONFIG_TX64X64
4, 8, 8, 16, 16, 32, 4, 16, 8, 32
4, 8, 8, 16, 16, 32,
#if CONFIG_TX64X64
32, 64,
#endif // CONFIG_TX64X64
4, 16, 8, 32
};
#if CONFIG_CHROMA_2X2
......@@ -948,44 +964,64 @@ static const int tx_size_high[TX_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2
2,
#endif
4, 8, 16, 32,
4, 8, 16, 32,
#if CONFIG_TX64X64
64,
#endif // CONFIG_TX64X64
8, 4, 16, 8, 32, 16, 16, 4, 32, 8
8, 4, 16, 8, 32, 16,
#if CONFIG_TX64X64
64, 32,
#endif // CONFIG_TX64X64
16, 4, 32, 8
};
// Transform block width in unit
static const int tx_size_wide_unit[TX_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2
1, 2, 4, 8, 16,
1, 2, 4, 8, 16,
#if CONFIG_TX64X64
32,
#endif // CONFIG_TX64X64
2, 4, 4, 8, 8, 16, 2, 8, 4, 16
2, 4, 4, 8, 8, 16,
#if CONFIG_TX64X64
16, 32,
#endif // CONFIG_TX64X64
2, 8, 4, 16
#else // CONFIG_CHROMA_2X2
1, 2, 4, 8,
1, 2, 4, 8,
#if CONFIG_TX64X64
16,
#endif // CONFIG_TX64X64
1, 2, 2, 4, 4, 8, 1, 4, 2, 8
1, 2, 2, 4, 4, 8,
#if CONFIG_TX64X64
8, 16,
#endif // CONFIG_TX64X64
1, 4, 2, 8
#endif // CONFIG_CHROMA_2X2
};
// Transform block height in unit
static const int tx_size_high_unit[TX_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2
1, 2, 4, 8, 16,
1, 2, 4, 8, 16,
#if CONFIG_TX64X64
32,
#endif // CONFIG_TX64X64
4, 2, 8, 4, 16, 8, 8, 2, 16, 4
4, 2, 8, 4, 16, 8,
#if CONFIG_TX64X64
32, 16,
#endif // CONFIG_TX64X64
8, 2, 16, 4
#else // CONFIG_CHROMA_2X2
1, 2, 4, 8,
#if CONFIG_TX64X64
16,
#endif // CONFIG_TX64X64
2, 1, 4, 2, 8, 4, 4, 1, 8, 2
2, 1, 4, 2, 8, 4,
#if CONFIG_TX64X64
16, 8,
#endif // CONFIG_TX64X64
4, 1, 8, 2
#endif // CONFIG_CHROMA_2X2
};
......@@ -998,7 +1034,11 @@ static const int tx_size_wide_log2[TX_SIZES_ALL] = {
#if CONFIG_TX64X64
6,
#endif // CONFIG_TX64X64
2, 3, 3, 4, 4, 5, 2, 4, 3, 5
2, 3, 3, 4, 4, 5,
#if CONFIG_TX64X64
5, 6,
#endif // CONFIG_TX64X64
2, 4, 3, 5
};
// Transform block height in log2
......@@ -1010,7 +1050,11 @@ static const int tx_size_high_log2[TX_SIZES_ALL] = {
#if CONFIG_TX64X64
6,
#endif // CONFIG_TX64X64
3, 2, 4, 3, 5, 4, 4, 2, 5, 3
3, 2, 4, 3, 5, 4,
#if CONFIG_TX64X64
6, 5,
#endif // CONFIG_TX64X64
4, 2, 5, 3
};
#define TX_UNIT_WIDE_LOG2 (MI_SIZE_LOG2 - tx_size_wide_log2[0])
......@@ -1020,11 +1064,15 @@ static const int tx_size_2d[TX_SIZES_ALL] = {
#if CONFIG_CHROMA_2X2
4,
#endif
16, 64, 256, 1024,
16, 64, 256, 1024,
#if CONFIG_TX64X64
4096,
#endif // CONFIG_TX64X64
32, 32, 128, 128, 512, 512, 64, 64, 256, 256
32, 32, 128, 128, 512, 512,
#if CONFIG_TX64X64
2048, 2048,
#endif // CONFIG_TX64X64
64, 64, 256, 256
};
static const BLOCK_SIZE txsize_to_bsize[TX_SIZES_ALL] = {
......@@ -1044,6 +1092,10 @@ static const BLOCK_SIZE txsize_to_bsize[TX_SIZES_ALL] = {
BLOCK_16X8, // TX_16X8
BLOCK_16X32, // TX_16X32
BLOCK_32X16, // TX_32X16
#if CONFIG_TX64X64
BLOCK_32X64, // TX_32X64
BLOCK_64X32, // TX_64X32
#endif // CONFIG_TX64X64
BLOCK_4X16, // TX_4X16
BLOCK_16X4, // TX_16X4
BLOCK_8X32, // TX_8X32
......@@ -1067,6 +1119,10 @@ static const TX_SIZE txsize_sqr_map[TX_SIZES_ALL] = {
TX_8X8, // TX_16X8
TX_16X16, // TX_16X32
TX_16X16, // TX_32X16
#if CONFIG_TX64X64
TX_32X32, // TX_32X64
TX_32X32, // TX_64X32
#endif // CONFIG_TX64X64
TX_4X4, // TX_4X16
TX_4X4, // TX_16X4
TX_8X8, // TX_8X32
......@@ -1090,6 +1146,10 @@ static const TX_SIZE txsize_sqr_up_map[TX_SIZES_ALL] = {
TX_16X16, // TX_16X8
TX_32X32, // TX_16X32
TX_32X32, // TX_32X16
#if CONFIG_TX64X64
TX_64X64, // TX_32X64
TX_64X64, // TX_64X32
#endif // CONFIG_TX64X64
TX_16X16, // TX_4X16
TX_16X16, // TX_16X4
TX_32X32, // TX_8X32
......@@ -1174,6 +1234,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
#if CONFIG_TX64X64
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
#endif // CONFIG_TX64X64
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
......@@ -1198,13 +1262,17 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
#if CONFIG_TX64X64
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
#endif // CONFIG_TX64X64
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
},
{
// BLOCK_2X4
// BLOCK_4X2
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
......@@ -1216,6 +1284,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
#if CONFIG_TX64X64
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
#endif // CONFIG_TX64X64
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
{ { TX_2X2, TX_2X2 }, { TX_2X2, TX_2X2 } },
......@@ -1226,7 +1298,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
},
#elif CONFIG_CHROMA_SUB8X8
{
// BLOCK_2X2
// BLOCK_2x2
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1237,6 +1309,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1260,13 +1336,17 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
},
{
// BLOCK_2X4
// BLOCK_4X2
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1277,6 +1357,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1303,6 +1387,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1335,6 +1423,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_4X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1364,6 +1456,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X4, TX_4X4 }, { TX_4X4, TX_4X4 } },
......@@ -1387,6 +1483,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X8, TX_8X4 }, { TX_4X8, TX_4X4 } },
{ { TX_8X8, TX_8X4 }, { TX_4X8, TX_4X4 } },
{ { TX_8X8, TX_8X4 }, { TX_4X8, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X4 }, { TX_4X8, TX_4X4 } },
{ { TX_8X4, TX_8X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X8, TX_8X4 }, { TX_4X8, TX_4X4 } },
......@@ -1402,7 +1502,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } },
{ { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_8X8, TX_8X8 }, { TX_4X4, TX_4X4 } },
{ { TX_8X16, TX_8X8 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X4, TX_8X4 }, { TX_4X4, TX_4X4 } },
......@@ -1410,6 +1510,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X16, TX_8X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X16, TX_8X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X16, TX_8X8 }, { TX_4X8, TX_4X8 } },
#if CONFIG_TX64X64
{ { TX_8X16, TX_8X8 }, { TX_4X4, TX_4X4 } },
{ { TX_8X16, TX_8X8 }, { TX_4X4, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X16, TX_4X8 }, { TX_4X16, TX_4X8 } },
{ { TX_8X4, TX_8X4 }, { TX_4X4, TX_4X4 } },
{ { TX_8X16, TX_8X8 }, { TX_4X16, TX_4X8 } },
......@@ -1425,7 +1529,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_8X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
{ { TX_8X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
#if CONFIG_TX64X64
{ { TX_8X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
{ { TX_16X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X4 }, { TX_4X8, TX_4X4 } },
{ { TX_8X4, TX_8X4 }, { TX_8X4, TX_8X4 } },
......@@ -1433,6 +1537,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_16X8, TX_8X4 }, { TX_8X8, TX_8X4 } }, // used
{ { TX_16X8, TX_8X4 }, { TX_8X8, TX_8X4 } },
{ { TX_16X8, TX_8X4 }, { TX_8X8, TX_8X4 } },
#if CONFIG_TX64X64
{ { TX_16X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
{ { TX_16X8, TX_4X4 }, { TX_8X8, TX_4X4 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X4 }, { TX_4X8, TX_4X4 } },
{ { TX_16X4, TX_16X4 }, { TX_8X4, TX_8X4 } },
{ { TX_8X8, TX_8X4 }, { TX_8X8, TX_8X4 } },
......@@ -1456,6 +1564,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_16X8, TX_16X8 }, { TX_8X8, TX_8X8 } },
{ { TX_16X16, TX_16X8 }, { TX_8X16, TX_8X8 } },
{ { TX_16X16, TX_16X8 }, { TX_8X16, TX_8X8 } },
#if CONFIG_TX64X64
{ { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } },
{ { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } },
#endif // CONFIG_TX64X64
{ { TX_4X16, TX_4X8 }, { TX_4X16, TX_4X8 } },
{ { TX_16X4, TX_16X4 }, { TX_8X4, TX_8X4 } },
{ { TX_8X16, TX_8X8 }, { TX_8X16, TX_8X8 } },
......@@ -1471,7 +1583,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } },
{ { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } },
#if CONFIG_TX64X64
{ { TX_16X16, TX_16X16 }, { TX_8X8, TX_8X8 } },
{ { TX_16X32, TX_16X16 }, { TX_8X8, TX_8X8 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X4, TX_8X4 }, { TX_8X4, TX_8X4 } },
......@@ -1479,6 +1591,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_16X8, TX_16X8 }, { TX_8X8, TX_8X8 } },
{ { TX_16X32, TX_16X16 }, { TX_8X16, TX_8X16 } }, // used
{ { TX_16X32, TX_16X16 }, { TX_8X16, TX_8X16 } },
#if CONFIG_TX64X64
{ { TX_16X32, TX_16X16 }, { TX_8X8, TX_8X8 } },
{ { TX_16X32, TX_16X16 }, { TX_8X8, TX_8X8 } },
#endif // CONFIG_TX64X64
{ { TX_4X16, TX_4X16 }, { TX_4X16, TX_4X16 } },
{ { TX_16X4, TX_16X4 }, { TX_8X4, TX_8X4 } },
{ { TX_8X32, TX_8X16 }, { TX_8X32, TX_8X16 } },
......@@ -1494,7 +1610,7 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
{ { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
#if CONFIG_TX64X64
{ { TX_16X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
{ { TX_32X16, TX_8X8 }, { TX_16X16, TX_8X8 } },
#endif // CONFIG_TX64X64
{ { TX_4X8, TX_4X8 }, { TX_4X8, TX_4X8 } },
{ { TX_8X4, TX_8X4 }, { TX_8X4, TX_8X4 } },
......@@ -1502,6 +1618,10 @@ static const TX_SIZE uv_txsize_lookup[BLOCK_SIZES_ALL][TX_SIZES_ALL][2][2] = {
{ { TX_16X8, TX_16X8 }, { TX_16X8, TX_16X8 } },
{ { TX_32X16, TX_16X8 }, { TX_16X16, TX_16X8 } },