Commit 7dd2ab4f authored by Thomas Daede's avatar Thomas Daede Committed by GitHub

Merge pull request #2 from ycho/rav1e_9_yushin

Update lookup tables
parents ae06052c 87fac36d
......@@ -131,9 +131,6 @@ static const uint64_t above_64x64_txform_mask[TX_SIZES] = {
// 00000000
// 00000000
static const uint64_t left_prediction_mask[BLOCK_SIZES_ALL] = {
0x0000000000000001ULL, // BLOCK_2X2,
0x0000000000000001ULL, // BLOCK_2X4,
0x0000000000000001ULL, // BLOCK_4X2,
0x0000000000000001ULL, // BLOCK_4X4,
0x0000000000000001ULL, // BLOCK_4X8,
0x0000000000000001ULL, // BLOCK_8X4,
......@@ -157,9 +154,6 @@ static const uint64_t left_prediction_mask[BLOCK_SIZES_ALL] = {
// 64 bit mask to shift and set for each prediction size.
static const uint64_t above_prediction_mask[BLOCK_SIZES_ALL] = {
0x0000000000000001ULL, // BLOCK_2X2
0x0000000000000001ULL, // BLOCK_2X4
0x0000000000000001ULL, // BLOCK_4X2
0x0000000000000001ULL, // BLOCK_4X4
0x0000000000000001ULL, // BLOCK_4X8
0x0000000000000001ULL, // BLOCK_8X4
......@@ -184,9 +178,6 @@ static const uint64_t above_prediction_mask[BLOCK_SIZES_ALL] = {
// each 8x8 block that would be in the top left most block of the given block
// size in the 64x64 block.
static const uint64_t size_mask[BLOCK_SIZES_ALL] = {
0x0000000000000001ULL, // BLOCK_2X2
0x0000000000000001ULL, // BLOCK_2X4
0x0000000000000001ULL, // BLOCK_4X2
0x0000000000000001ULL, // BLOCK_4X4
0x0000000000000001ULL, // BLOCK_4X8
0x0000000000000001ULL, // BLOCK_8X4
......@@ -235,9 +226,6 @@ static const uint16_t above_64x64_txform_mask_uv[TX_SIZES] = {
// 16 bit left mask to shift and set for each uv prediction size.
static const uint16_t left_prediction_mask_uv[BLOCK_SIZES_ALL] = {
0x0001, // BLOCK_2X2,
0x0001, // BLOCK_2X4,
0x0001, // BLOCK_4X2,
0x0001, // BLOCK_4X4,
0x0001, // BLOCK_4X8,
0x0001, // BLOCK_8X4,
......@@ -261,9 +249,6 @@ static const uint16_t left_prediction_mask_uv[BLOCK_SIZES_ALL] = {
// 16 bit above mask to shift and set for uv each prediction size.
static const uint16_t above_prediction_mask_uv[BLOCK_SIZES_ALL] = {
0x0001, // BLOCK_2X2
0x0001, // BLOCK_2X4
0x0001, // BLOCK_4X2
0x0001, // BLOCK_4X4
0x0001, // BLOCK_4X8
0x0001, // BLOCK_8X4
......@@ -287,9 +272,6 @@ static const uint16_t above_prediction_mask_uv[BLOCK_SIZES_ALL] = {
// 64 bit mask to shift and set for each uv prediction size
static const uint16_t size_mask_uv[BLOCK_SIZES_ALL] = {
0x0001, // BLOCK_2X2
0x0001, // BLOCK_2X4
0x0001, // BLOCK_4X2
0x0001, // BLOCK_4X4
0x0001, // BLOCK_4X8
0x0001, // BLOCK_8X4
......@@ -1954,9 +1936,6 @@ typedef enum EDGE_DIR { VERT_EDGE = 0, HORZ_EDGE = 1, NUM_EDGE_DIRS } EDGE_DIR;
static const uint32_t av1_prediction_masks[NUM_EDGE_DIRS][BLOCK_SIZES_ALL] = {
// mask for vertical edges filtering
{
2 - 1, // BLOCK_2X2
2 - 1, // BLOCK_2X4
4 - 1, // BLOCK_4X2
4 - 1, // BLOCK_4X4
4 - 1, // BLOCK_4X8
8 - 1, // BLOCK_8X4
......@@ -1988,9 +1967,6 @@ static const uint32_t av1_prediction_masks[NUM_EDGE_DIRS][BLOCK_SIZES_ALL] = {
},
// mask for horizontal edges filtering
{
2 - 1, // BLOCK_2X2
4 - 1, // BLOCK_2X4
2 - 1, // BLOCK_4X2
4 - 1, // BLOCK_4X4
8 - 1, // BLOCK_4X8
4 - 1, // BLOCK_8X4
......
......@@ -860,9 +860,6 @@ static INLINE int get_ext_tx_types(TX_SIZE tx_size, BLOCK_SIZE bs, int is_inter,
static INLINE int is_rect_tx_allowed_bsize(BLOCK_SIZE bsize) {
static const char LUT[BLOCK_SIZES_ALL] = {
0, // BLOCK_2X2
0, // BLOCK_2X4
0, // BLOCK_4X2
0, // BLOCK_4X4
1, // BLOCK_4X8
1, // BLOCK_8X4
......
This diff is collapsed.
......@@ -726,8 +726,7 @@ static const aom_cdf_prob
static const aom_cdf_prob
default_compound_type_cdf[BLOCK_SIZES_ALL][CDF_SIZE(COMPOUND_TYPES)] = {
{ AOM_CDF3(16384, 24576) }, { AOM_CDF3(16384, 24576) },
{ AOM_CDF3(16384, 24576) }, { AOM_CDF3(16384, 24576) },
{ AOM_CDF3(16384, 24576) },
{ AOM_CDF3(32640, 32704) }, { AOM_CDF3(32640, 32704) },
{ AOM_CDF3(8448, 13293) }, { AOM_CDF3(9216, 12436) },
{ AOM_CDF3(10112, 12679) }, { AOM_CDF3(9088, 10753) },
......@@ -762,7 +761,6 @@ static const aom_cdf_prob
static const aom_cdf_prob
default_wedge_interintra_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
{ AOM_CDF2(194 * 128) }, { AOM_CDF2(213 * 128) }, { AOM_CDF2(217 * 128) },
{ AOM_CDF2(222 * 128) }, { AOM_CDF2(224 * 128) }, { AOM_CDF2(226 * 128) },
......@@ -784,7 +782,7 @@ const aom_tree_index av1_motion_mode_tree[TREE_SIZE(MOTION_MODES)] = {
static const aom_prob
default_motion_mode_prob[BLOCK_SIZES_ALL][MOTION_MODES - 1] = {
{ 128, 128 }, { 128, 128 }, { 128, 128 }, { 128, 128 },
{ 128, 128 },
{ 128, 128 }, { 128, 128 }, { 62, 115 }, { 39, 131 },
{ 39, 132 }, { 118, 94 }, { 77, 125 }, { 100, 121 },
{ 190, 66 }, { 207, 102 }, { 197, 100 }, { 239, 76 },
......@@ -799,8 +797,7 @@ static const aom_prob
};
static const aom_cdf_prob
default_motion_mode_cdf[BLOCK_SIZES_ALL][CDF_SIZE(MOTION_MODES)] = {
{ AOM_CDF3(16384, 24576) }, { AOM_CDF3(16384, 24576) },
{ AOM_CDF3(16384, 24576) }, { AOM_CDF3(16384, 24576) },
{ AOM_CDF3(16384, 24576) },
{ AOM_CDF3(16384, 24576) }, { AOM_CDF3(16384, 24576) },
{ AOM_CDF3(7936, 19091) }, { AOM_CDF3(4991, 19205) },
{ AOM_CDF3(4992, 19314) }, { AOM_CDF3(15104, 21590) },
......@@ -820,7 +817,6 @@ static const aom_cdf_prob
};
static const aom_cdf_prob default_obmc_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
{ AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) }, { AOM_CDF2(128 * 128) },
{ AOM_CDF2(45 * 128) }, { AOM_CDF2(79 * 128) }, { AOM_CDF2(75 * 128) },
{ AOM_CDF2(130 * 128) }, { AOM_CDF2(141 * 128) }, { AOM_CDF2(144 * 128) },
......
......@@ -139,9 +139,6 @@ typedef enum BITSTREAM_PROFILE {
// type, so that we can save memory when they are used in structs/arrays.
typedef enum ATTRIBUTE_PACKED {
BLOCK_2X2,
BLOCK_2X4,
BLOCK_4X2,
BLOCK_4X4,
BLOCK_4X8,
BLOCK_8X4,
......
......@@ -241,9 +241,6 @@ static const wedge_code_type wedge_codebook_16_heqw[16] = {
};
const wedge_params_type wedge_params_lookup[BLOCK_SIZES_ALL] = {
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
{ 0, NULL, NULL, 0, NULL },
......@@ -2235,7 +2232,6 @@ static const int ii_weights1d[MAX_SB_SIZE] = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
static int ii_size_scales[BLOCK_SIZES_ALL] = {
32, 32, 32,
32, 16, 16, 16, 8, 8, 8, 4,
4, 4, 2, 2, 2, 1, 1, 1,
16, 16, 8, 8, 4, 4, 2, 2
......@@ -2248,7 +2244,6 @@ static const int ii_weights1d[MAX_SB_SIZE] = {
2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
static int ii_size_scales[BLOCK_SIZES_ALL] = {
16, 16, 16,
16, 8, 8, 8, 4, 4, 4,
2, 2, 2, 1, 1, 1,
8, 8, 4, 4, 2, 2,
......
......@@ -407,8 +407,6 @@ static const uint16_t orders_vert_8x8[256] = {
#if CONFIG_EXT_PARTITION
/* clang-format off */
static const uint16_t *const orders[BLOCK_SIZES_ALL] = {
// 2X2, 2X4, 4X2
NULL, NULL, NULL,
// 4X4
orders_4x4,
// 4X8, 8X4, 8X8
......@@ -441,8 +439,6 @@ static const uint16_t *const orders[BLOCK_SIZES_ALL] = {
#else
/* clang-format off */
static const uint16_t *const orders[BLOCK_SIZES_ALL] = {
// 2X2, 2X4, 4X2
NULL, NULL, NULL,
// 4X4
orders_8x8,
// 4X8, 8X4, 8X8
......
......@@ -159,9 +159,6 @@ static const uint8_t num_16x16_blocks_wide_lookup[BLOCK_SIZES_ALL] = {
1,
1,
1,
1,
1,
1,
2,
2,
2,
......@@ -183,9 +180,6 @@ static const uint8_t num_16x16_blocks_high_lookup[BLOCK_SIZES_ALL] = {
1,
1,
1,
1,
1,
1,
2,
1,
2,
......@@ -2016,7 +2010,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td,
/* clang-format off */
static const BLOCK_SIZE min_partition_size[BLOCK_SIZES_ALL] = {
BLOCK_2X2, BLOCK_2X2, BLOCK_2X2, // 2x2, 2x4, 4x2
BLOCK_4X4, // 4x4
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, // 4x8, 8x4, 8x8
BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, // 8x16, 16x8, 16x16
......@@ -2033,7 +2026,6 @@ static const BLOCK_SIZE min_partition_size[BLOCK_SIZES_ALL] = {
};
static const BLOCK_SIZE max_partition_size[BLOCK_SIZES_ALL] = {
BLOCK_4X4, BLOCK_4X4, BLOCK_4X4, // 2x2, 2x4, 4x2
BLOCK_8X8, // 4x4
BLOCK_16X16, BLOCK_16X16, BLOCK_16X16, // 4x8, 8x4, 8x8
BLOCK_32X32, BLOCK_32X32, BLOCK_32X32, // 8x16, 16x8, 16x16
......@@ -2051,7 +2043,6 @@ static const BLOCK_SIZE max_partition_size[BLOCK_SIZES_ALL] = {
// Next square block size less or equal than current block size.
static const BLOCK_SIZE next_square_size[BLOCK_SIZES_ALL] = {
BLOCK_2X2, BLOCK_2X2, BLOCK_2X2, // 2x2, 2x4, 4x2
BLOCK_4X4, // 4x4
BLOCK_4X4, BLOCK_4X4, BLOCK_8X8, // 4x8, 8x4, 8x8
BLOCK_8X8, BLOCK_8X8, BLOCK_16X16, // 8x16, 16x8, 16x16
......
......@@ -54,7 +54,7 @@
// This table is used to correct for block size.
// The factors here are << 2 (2 = x0.5, 32 = x8 etc).
static const uint8_t rd_thresh_block_size_factor[BLOCK_SIZES_ALL] = {
2, 2, 2, 2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32,
2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32,
#if CONFIG_EXT_PARTITION
48, 48, 64,
#endif // CONFIG_EXT_PARTITION
......
......@@ -4614,7 +4614,7 @@ static int find_tx_size_rd_records(MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row,
static const uint32_t skip_pred_threshold[3][BLOCK_SIZES_ALL] = {
{
0, 0, 0, 50, 50, 50, 55, 47, 47, 53, 53, 53, 0, 0, 0, 0,
50, 50, 50, 55, 47, 47, 53, 53, 53, 0, 0, 0, 0,
#if CONFIG_EXT_PARTITION
0, 0, 0,
#endif
......@@ -4624,7 +4624,7 @@ static const uint32_t skip_pred_threshold[3][BLOCK_SIZES_ALL] = {
#endif
},
{
0, 0, 0, 69, 69, 69, 67, 68, 68, 53, 53, 53, 0, 0, 0, 0,
69, 69, 69, 67, 68, 68, 53, 53, 53, 0, 0, 0, 0,
#if CONFIG_EXT_PARTITION
0, 0, 0,
#endif
......@@ -4634,7 +4634,7 @@ static const uint32_t skip_pred_threshold[3][BLOCK_SIZES_ALL] = {
#endif
},
{
0, 0, 0, 70, 73, 73, 70, 73, 73, 58, 58, 58, 0, 0, 0, 0,
70, 73, 73, 70, 73, 73, 58, 58, 58, 0, 0, 0, 0,
#if CONFIG_EXT_PARTITION
0, 0, 0,
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment