From 0cf54d401a400a6753c4ccd8b54a9570cf4f4acd Mon Sep 17 00:00:00 2001 From: Sebastien Alaiwan Date: Mon, 16 Oct 2017 16:10:04 +0200 Subject: [PATCH] Remove abandonned SUPERTX experiment Change-Id: I9d3f0380865790d8adcb85f03305b193bc1949d7 --- av1/common/av1_loopfilter.c | 235 +-- av1/common/blockd.h | 21 - av1/common/common_data.h | 27 - av1/common/entropymode.c | 31 - av1/common/entropymode.h | 7 - av1/common/enums.h | 5 - av1/common/onyxc_int.h | 14 - av1/common/reconinter.c | 408 +--- av1/common/reconinter.h | 35 +- av1/decoder/decodeframe.c | 1109 +---------- av1/decoder/decodemv.c | 229 +-- av1/decoder/decodemv.h | 6 - av1/encoder/bitstream.c | 249 +-- av1/encoder/bitstream.h | 3 - av1/encoder/context_tree.c | 25 - av1/encoder/context_tree.h | 11 - av1/encoder/encodeframe.c | 2590 ++----------------------- av1/encoder/encodemb.c | 28 - av1/encoder/encodemb.h | 3 - av1/encoder/firstpass.c | 3 - av1/encoder/rdopt.c | 137 +- av1/encoder/rdopt.h | 23 +- av1/encoder/tokenize.c | 49 - av1/encoder/tokenize.h | 5 - build/cmake/aom_config_defaults.cmake | 1 - build/cmake/aom_experiment_deps.cmake | 7 - configure | 7 - tools/aom_entropy_optimizer.c | 13 - 28 files changed, 375 insertions(+), 4906 deletions(-) diff --git a/av1/common/av1_loopfilter.c b/av1/common/av1_loopfilter.c index 87897186b..e00101b77 100644 --- a/av1/common/av1_loopfilter.c +++ b/av1/common/av1_loopfilter.c @@ -640,15 +640,7 @@ static uint8_t get_filter_level(const AV1_COMMON *cm, return cm->mi[mi_row * cm->mi_stride + mi_col].mbmi.filt_lvl; #endif -#if CONFIG_SUPERTX - const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx); - assert( - IMPLIES(supertx_enabled(mbmi), mbmi->segment_id_supertx != MAX_SEGMENTS)); - assert(IMPLIES(supertx_enabled(mbmi), - mbmi->segment_id_supertx <= mbmi->segment_id)); -#else const int segment_id = mbmi->segment_id; -#endif // CONFIG_SUPERTX if (cm->delta_lf_present_flag) { #if CONFIG_LOOPFILTER_LEVEL int delta_lf; @@ -706,15 +698,7 @@ static uint8_t get_filter_level(const loop_filter_info_n *lfi_n, return mbmi->filt_lvl; #endif -#if CONFIG_SUPERTX - const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx); - assert( - IMPLIES(supertx_enabled(mbmi), mbmi->segment_id_supertx != MAX_SEGMENTS)); - assert(IMPLIES(supertx_enabled(mbmi), - mbmi->segment_id_supertx <= mbmi->segment_id)); -#else const int segment_id = mbmi->segment_id; -#endif // CONFIG_SUPERTX return lfi_n->lvl[segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]]; } #endif @@ -1619,20 +1603,12 @@ static void build_masks(AV1_COMMON *const cm, static void build_y_mask(AV1_COMMON *const cm, const loop_filter_info_n *const lfi_n, const MODE_INFO *mi, const int shift_y, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif // CONFIG_SUPERTX LOOP_FILTER_MASK *lfm) { const MB_MODE_INFO *mbmi = &mi->mbmi; const TX_SIZE tx_size_y = txsize_sqr_map[mbmi->tx_size]; const TX_SIZE tx_size_y_left = txsize_horz_map[mbmi->tx_size]; const TX_SIZE tx_size_y_above = txsize_vert_map[mbmi->tx_size]; -#if CONFIG_SUPERTX - const BLOCK_SIZE block_size = - supertx_enabled ? (BLOCK_SIZE)(3 * tx_size_y) : mbmi->sb_type; -#else const BLOCK_SIZE block_size = mbmi->sb_type; -#endif #if CONFIG_EXT_DELTA_Q #if CONFIG_LOOPFILTER_LEVEL const int filter_level = get_filter_level(cm, lfi_n, 0, 0, mbmi); @@ -1751,151 +1727,100 @@ void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col, // through the recursive loop structure multiple times. switch (mip[0]->mbmi.sb_type) { case BLOCK_64X64: build_masks(cm, lfi_n, mip[0], 0, 0, lfm); break; - case BLOCK_64X32: build_masks(cm, lfi_n, mip[0], 0, 0, lfm); -#if CONFIG_SUPERTX && CONFIG_TX64X64 - if (supertx_enabled(&mip[0]->mbmi)) break; -#endif // CONFIG_SUPERTX && CONFIG_TX64X64 + case BLOCK_64X32: + build_masks(cm, lfi_n, mip[0], 0, 0, lfm); mip2 = mip + mode_info_stride * 4; if (4 >= max_rows) break; build_masks(cm, lfi_n, mip2[0], 32, 8, lfm); break; - case BLOCK_32X64: build_masks(cm, lfi_n, mip[0], 0, 0, lfm); -#if CONFIG_SUPERTX && CONFIG_TX64X64 - if (supertx_enabled(&mip[0]->mbmi)) break; -#endif // CONFIG_SUPERTX && CONFIG_TX64X64 + case BLOCK_32X64: + build_masks(cm, lfi_n, mip[0], 0, 0, lfm); mip2 = mip + 4; if (4 >= max_cols) break; build_masks(cm, lfi_n, mip2[0], 4, 2, lfm); break; default: -#if CONFIG_SUPERTX && CONFIG_TX64X64 - if (mip[0]->mbmi.tx_size == TX_64X64) { - build_masks(cm, lfi_n, mip[0], 0, 0, lfm); - } else { -#endif // CONFIG_SUPERTX && CONFIG_TX64X64 - for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) { - const int shift_y_32 = shift_32_y[idx_32]; - const int shift_uv_32 = shift_32_uv[idx_32]; - const int mi_32_col_offset = ((idx_32 & 1) << 2); - const int mi_32_row_offset = ((idx_32 >> 1) << 2); - if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows) - continue; - switch (mip[0]->mbmi.sb_type) { - case BLOCK_32X32: - build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); - break; - case BLOCK_32X16: - build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); -#if CONFIG_SUPERTX - if (supertx_enabled(&mip[0]->mbmi)) break; -#endif - if (mi_32_row_offset + 2 >= max_rows) continue; - mip2 = mip + mode_info_stride * 2; - build_masks(cm, lfi_n, mip2[0], shift_y_32 + 16, shift_uv_32 + 4, - lfm); - break; - case BLOCK_16X32: - build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); -#if CONFIG_SUPERTX - if (supertx_enabled(&mip[0]->mbmi)) break; -#endif - if (mi_32_col_offset + 2 >= max_cols) continue; - mip2 = mip + 2; - build_masks(cm, lfi_n, mip2[0], shift_y_32 + 2, shift_uv_32 + 1, - lfm); - break; - default: -#if CONFIG_SUPERTX - if (mip[0]->mbmi.tx_size == TX_32X32) { - build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); - break; - } -#endif - for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) { - const int shift_y_32_16 = shift_y_32 + shift_16_y[idx_16]; - const int shift_uv_32_16 = shift_uv_32 + shift_16_uv[idx_16]; - const int mi_16_col_offset = - mi_32_col_offset + ((idx_16 & 1) << 1); - const int mi_16_row_offset = - mi_32_row_offset + ((idx_16 >> 1) << 1); - - if (mi_16_col_offset >= max_cols || - mi_16_row_offset >= max_rows) - continue; - - switch (mip[0]->mbmi.sb_type) { - case BLOCK_16X16: - build_masks(cm, lfi_n, mip[0], shift_y_32_16, - shift_uv_32_16, lfm); - break; - case BLOCK_16X8: -#if CONFIG_SUPERTX - if (supertx_enabled(&mip[0]->mbmi)) break; -#endif - build_masks(cm, lfi_n, mip[0], shift_y_32_16, - shift_uv_32_16, lfm); - if (mi_16_row_offset + 1 >= max_rows) continue; - mip2 = mip + mode_info_stride; - build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 8, -#if CONFIG_SUPERTX - 0, -#endif - lfm); - break; - case BLOCK_8X16: -#if CONFIG_SUPERTX - if (supertx_enabled(&mip[0]->mbmi)) break; -#endif - build_masks(cm, lfi_n, mip[0], shift_y_32_16, - shift_uv_32_16, lfm); - if (mi_16_col_offset + 1 >= max_cols) continue; - mip2 = mip + 1; - build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 1, -#if CONFIG_SUPERTX - 0, -#endif - lfm); - break; - default: { - const int shift_y_32_16_8_zero = - shift_y_32_16 + shift_8_y[0]; -#if CONFIG_SUPERTX - if (mip[0]->mbmi.tx_size == TX_16X16) { - build_masks(cm, lfi_n, mip[0], shift_y_32_16_8_zero, - shift_uv_32_16, lfm); - break; - } -#endif - build_masks(cm, lfi_n, mip[0], shift_y_32_16_8_zero, - shift_uv_32_16, lfm); - mip += offset[0]; - for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) { - const int shift_y_32_16_8 = - shift_y_32_16 + shift_8_y[idx_8]; - const int mi_8_col_offset = - mi_16_col_offset + ((idx_8 & 1)); - const int mi_8_row_offset = - mi_16_row_offset + ((idx_8 >> 1)); - - if (mi_8_col_offset >= max_cols || - mi_8_row_offset >= max_rows) - continue; - build_y_mask(cm, lfi_n, mip[0], shift_y_32_16_8, -#if CONFIG_SUPERTX - supertx_enabled(&mip[0]->mbmi), -#endif - lfm); - } - break; + for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) { + const int shift_y_32 = shift_32_y[idx_32]; + const int shift_uv_32 = shift_32_uv[idx_32]; + const int mi_32_col_offset = ((idx_32 & 1) << 2); + const int mi_32_row_offset = ((idx_32 >> 1) << 2); + if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows) + continue; + switch (mip[0]->mbmi.sb_type) { + case BLOCK_32X32: + build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); + break; + case BLOCK_32X16: + build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); + if (mi_32_row_offset + 2 >= max_rows) continue; + mip2 = mip + mode_info_stride * 2; + build_masks(cm, lfi_n, mip2[0], shift_y_32 + 16, shift_uv_32 + 4, + lfm); + break; + case BLOCK_16X32: + build_masks(cm, lfi_n, mip[0], shift_y_32, shift_uv_32, lfm); + if (mi_32_col_offset + 2 >= max_cols) continue; + mip2 = mip + 2; + build_masks(cm, lfi_n, mip2[0], shift_y_32 + 2, shift_uv_32 + 1, + lfm); + break; + default: + for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) { + const int shift_y_32_16 = shift_y_32 + shift_16_y[idx_16]; + const int shift_uv_32_16 = shift_uv_32 + shift_16_uv[idx_16]; + const int mi_16_col_offset = + mi_32_col_offset + ((idx_16 & 1) << 1); + const int mi_16_row_offset = + mi_32_row_offset + ((idx_16 >> 1) << 1); + + if (mi_16_col_offset >= max_cols || mi_16_row_offset >= max_rows) + continue; + + switch (mip[0]->mbmi.sb_type) { + case BLOCK_16X16: + build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16, + lfm); + break; + case BLOCK_16X8: + build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16, + lfm); + if (mi_16_row_offset + 1 >= max_rows) continue; + mip2 = mip + mode_info_stride; + build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 8, lfm); + break; + case BLOCK_8X16: + build_masks(cm, lfi_n, mip[0], shift_y_32_16, shift_uv_32_16, + lfm); + if (mi_16_col_offset + 1 >= max_cols) continue; + mip2 = mip + 1; + build_y_mask(cm, lfi_n, mip2[0], shift_y_32_16 + 1, lfm); + break; + default: { + const int shift_y_32_16_8_zero = shift_y_32_16 + shift_8_y[0]; + build_masks(cm, lfi_n, mip[0], shift_y_32_16_8_zero, + shift_uv_32_16, lfm); + mip += offset[0]; + for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) { + const int shift_y_32_16_8 = + shift_y_32_16 + shift_8_y[idx_8]; + const int mi_8_col_offset = + mi_16_col_offset + ((idx_8 & 1)); + const int mi_8_row_offset = + mi_16_row_offset + ((idx_8 >> 1)); + + if (mi_8_col_offset >= max_cols || + mi_8_row_offset >= max_rows) + continue; + build_y_mask(cm, lfi_n, mip[0], shift_y_32_16_8, lfm); } + break; } } - break; - } + } + break; } -#if CONFIG_SUPERTX && CONFIG_TX64X64 } -#endif // CONFIG_SUPERTX && CONFIG_TX64X64 break; } // The largest loopfilter we have is 16x16 so we use the 16x16 mask diff --git a/av1/common/blockd.h b/av1/common/blockd.h index 6d24b4f11..d8309ce5b 100644 --- a/av1/common/blockd.h +++ b/av1/common/blockd.h @@ -360,10 +360,6 @@ typedef struct MB_MODE_INFO { #endif int8_t skip; int8_t segment_id; -#if CONFIG_SUPERTX - // Minimum of all segment IDs under the current supertx block. - int8_t segment_id_supertx; -#endif // CONFIG_SUPERTX int8_t seg_id_predicted; // valid only when temporal_update is enabled #if CONFIG_MRC_TX @@ -759,9 +755,6 @@ typedef struct macroblockd { TXFM_CONTEXT left_txfm_context_buffer[2 * MAX_MIB_SIZE]; TX_SIZE max_tx_size; -#if CONFIG_SUPERTX - TX_SIZE supertx_size; -#endif #endif #if CONFIG_LOOP_RESTORATION @@ -877,14 +870,6 @@ static const TX_TYPE intra_mode_to_tx_type_context[INTRA_MODES] = { ADST_ADST, // PAETH }; -#if CONFIG_SUPERTX -static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) { - TX_SIZE max_tx_size = txsize_sqr_map[mbmi->tx_size]; - return tx_size_wide[max_tx_size] > - AOMMIN(block_size_wide[mbmi->sb_type], block_size_high[mbmi->sb_type]); -} -#endif // CONFIG_SUPERTX - #define USE_TXTYPE_SEARCH_FOR_SUB8X8_IN_CB4X4 1 #if CONFIG_RECT_TX @@ -1387,12 +1372,6 @@ static INLINE TX_SIZE av1_get_uv_tx_size(const MB_MODE_INFO *mbmi, assert(mbmi->tx_size > TX_2X2); #endif // CONFIG_CHROMA_2X2 -#if CONFIG_SUPERTX - if (supertx_enabled(mbmi)) - return uvsupertx_size_lookup[txsize_sqr_map[mbmi->tx_size]] - [pd->subsampling_x][pd->subsampling_y]; -#endif // CONFIG_SUPERTX - const TX_SIZE uv_txsize = uv_txsize_lookup[mbmi->sb_type][mbmi->tx_size][pd->subsampling_x] [pd->subsampling_y]; diff --git a/av1/common/common_data.h b/av1/common/common_data.h index 9bb04f72c..8f0fbefcc 100644 --- a/av1/common/common_data.h +++ b/av1/common/common_data.h @@ -2119,33 +2119,6 @@ static const int intra_mode_context[INTRA_MODES] = { }; #endif -#if CONFIG_SUPERTX -static const TX_SIZE uvsupertx_size_lookup[TX_SIZES][2][2] = { -// ss_x == 0 ss_x == 0 ss_x == 1 ss_x == 1 -// ss_y == 0 ss_y == 1 ss_y == 0 ss_y == 1 -#if CONFIG_CHROMA_2X2 - { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, -#endif - { { TX_4X4, TX_4X4 }, { TX_4X4, TX_4X4 } }, - { { TX_8X8, TX_4X4 }, { TX_4X4, TX_4X4 } }, - { { TX_16X16, TX_8X8 }, { TX_8X8, TX_8X8 } }, - { { TX_32X32, TX_16X16 }, { TX_16X16, TX_16X16 } }, -#if CONFIG_TX64X64 - { { TX_64X64, TX_32X32 }, { TX_32X32, TX_32X32 } }, -#endif // CONFIG_TX64X64 -}; - -#if CONFIG_EXT_PARTITION_TYPES -static const int partition_supertx_context_lookup[EXT_PARTITION_TYPES] = { - -1, 0, 0, 1, 0, 0, 0, 0, 0, 0 -}; - -#else -static const int partition_supertx_context_lookup[PARTITION_TYPES] = { -1, 0, 0, - 1 }; -#endif // CONFIG_EXT_PARTITION_TYPES -#endif // CONFIG_SUPERTX - #if CONFIG_NCOBMC_ADAPT_WEIGHT // NCOBMC_ADAPT_INTRPL only supports block size >= BLOCK_8X8 and <= BLOCK_64X64 static const ADAPT_OVERLAP_BLOCK adapt_overlap_block_lookup[BLOCK_SIZES_ALL] = { diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c index 0cd7096f0..90552e1d9 100644 --- a/av1/common/entropymode.c +++ b/av1/common/entropymode.c @@ -2654,25 +2654,6 @@ static const aom_cdf_prob }; #endif // CONFIG_FILTER_INTRA -#if CONFIG_SUPERTX -static const aom_prob - default_supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES] = { -#if CONFIG_CHROMA_2X2 -#if CONFIG_TX64X64 - { 1, 1, 160, 160, 170, 180 }, { 1, 1, 200, 200, 210, 220 }, -#else - { 1, 1, 160, 160, 170 }, { 1, 1, 200, 200, 210 }, -#endif // CONFIG_TX64X64 -#else -#if CONFIG_TX64X64 - { 1, 160, 160, 170, 180 }, { 1, 200, 200, 210, 220 }, -#else - { 1, 160, 160, 170 }, { 1, 200, 200, 210 }, -#endif // CONFIG_TX64X64 -#endif // CONFIG_CHROMA_2X2 - }; -#endif // CONFIG_SUPERTX - // FIXME(someone) need real defaults here static const aom_prob default_segment_tree_probs[SEG_TREE_PROBS] = { 128, 128, 128, 128, 128, 128, 128 @@ -6565,9 +6546,6 @@ static void init_mode_probs(FRAME_CONTEXT *fc) { av1_copy(fc->interintra_mode_prob, default_interintra_mode_prob); av1_copy(fc->interintra_mode_cdf, default_interintra_mode_cdf); #endif // CONFIG_INTERINTRA -#if CONFIG_SUPERTX - av1_copy(fc->supertx_prob, default_supertx_prob); -#endif // CONFIG_SUPERTX av1_copy(fc->seg.tree_probs, default_segment_tree_probs); av1_copy(fc->seg.pred_probs, default_segment_pred_probs); #if CONFIG_NEW_MULTISYMBOL @@ -6704,15 +6682,6 @@ void av1_adapt_inter_frame_probs(AV1_COMMON *cm) { #endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION -#if CONFIG_SUPERTX - for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) { - for (j = TX_8X8; j < TX_SIZES; ++j) { - fc->supertx_prob[i][j] = av1_mode_mv_merge_probs( - pre_fc->supertx_prob[i][j], counts->supertx[i][j]); - } - } -#endif // CONFIG_SUPERTX - for (i = 0; i < INTER_MODE_CONTEXTS; i++) aom_tree_merge_probs( av1_inter_compound_mode_tree, pre_fc->inter_compound_mode_probs[i], diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h index ae23384bf..a98a9bbec 100644 --- a/av1/common/entropymode.h +++ b/av1/common/entropymode.h @@ -326,9 +326,6 @@ typedef struct frame_contexts { aom_cdf_prob intrabc_cdf[CDF_SIZE(2)]; #endif int initialized; -#if CONFIG_SUPERTX - aom_prob supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES]; -#endif // CONFIG_SUPERTX struct segmentation_probs seg; #if CONFIG_FILTER_INTRA aom_prob filter_intra_probs[PLANE_TYPES]; @@ -541,10 +538,6 @@ typedef struct FRAME_COUNTS { unsigned int inter_ext_tx[EXT_TX_SIZES][TX_TYPES]; #endif // CONFIG_EXT_TX #endif // CONFIG_ENTROPY_STATS -#if CONFIG_SUPERTX - unsigned int supertx[PARTITION_SUPERTX_CONTEXTS][TX_SIZES][2]; - unsigned int supertx_size[TX_SIZES]; -#endif // CONFIG_SUPERTX struct seg_counts seg; #if CONFIG_FILTER_INTRA unsigned int filter_intra[PLANE_TYPES][2]; diff --git a/av1/common/enums.h b/av1/common/enums.h index b04b21e04..bacd772cd 100644 --- a/av1/common/enums.h +++ b/av1/common/enums.h @@ -712,11 +712,6 @@ typedef enum { #define MODE_CTX_REF_FRAMES (TOTAL_REFS_PER_FRAME + COMP_REFS) -#if CONFIG_SUPERTX -#define PARTITION_SUPERTX_CONTEXTS 2 -#define MAX_SUPERTX_BLOCK_SIZE BLOCK_32X32 -#endif // CONFIG_SUPERTX - #if CONFIG_LOOP_RESTORATION typedef enum { RESTORE_NONE, diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h index 98b66e3c2..2d2890c50 100644 --- a/av1/common/onyxc_int.h +++ b/av1/common/onyxc_int.h @@ -914,20 +914,6 @@ static INLINE int is_chroma_reference(int mi_row, int mi_col, BLOCK_SIZE bsize, #endif } -#if CONFIG_SUPERTX -static INLINE int need_handle_chroma_sub8x8(BLOCK_SIZE bsize, int subsampling_x, - int subsampling_y) { - const int bw = mi_size_wide[bsize]; - const int bh = mi_size_high[bsize]; - - if (bsize >= BLOCK_8X8 || - ((!(bh & 0x01) || !subsampling_y) && (!(bw & 0x01) || !subsampling_x))) - return 0; - else - return 1; -} -#endif - static INLINE BLOCK_SIZE scale_chroma_bsize(BLOCK_SIZE bsize, int subsampling_x, int subsampling_y) { BLOCK_SIZE bs = bsize; diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c index e640ca71b..6177c75f9 100644 --- a/av1/common/reconinter.c +++ b/av1/common/reconinter.c @@ -834,64 +834,6 @@ void av1_init_wedge_masks() { init_wedge_masks(); } -#if CONFIG_SUPERTX -static void build_masked_compound_wedge_extend( - uint8_t *dst, int dst_stride, const uint8_t *src0, int src0_stride, - const uint8_t *src1, int src1_stride, - const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type, - int wedge_offset_x, int wedge_offset_y, int h, int w) { - const int subh = (2 << b_height_log2_lookup[sb_type]) == h; - const int subw = (2 << b_width_log2_lookup[sb_type]) == w; - const uint8_t *mask; - size_t mask_stride; - switch (comp_data->interinter_compound_type) { - case COMPOUND_WEDGE: - mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign, - sb_type, wedge_offset_x, wedge_offset_y); - mask_stride = MASK_MASTER_STRIDE; - break; -#if CONFIG_COMPOUND_SEGMENT - case COMPOUND_SEG: - mask = comp_data->seg_mask; - mask_stride = block_size_wide[sb_type]; - break; -#endif - default: assert(0); return; - } - aom_blend_a64_mask(dst, dst_stride, src0, src0_stride, src1, src1_stride, - mask, (int)mask_stride, h, w, subh, subw); -} - -#if CONFIG_HIGHBITDEPTH -static void build_masked_compound_wedge_extend_highbd( - uint8_t *dst_8, int dst_stride, const uint8_t *src0_8, int src0_stride, - const uint8_t *src1_8, int src1_stride, - const INTERINTER_COMPOUND_DATA *const comp_data, BLOCK_SIZE sb_type, - int wedge_offset_x, int wedge_offset_y, int h, int w, int bd) { - const int subh = (2 << b_height_log2_lookup[sb_type]) == h; - const int subw = (2 << b_width_log2_lookup[sb_type]) == w; - const uint8_t *mask; - size_t mask_stride; - switch (comp_data->interinter_compound_type) { - case COMPOUND_WEDGE: - mask = av1_get_soft_mask(comp_data->wedge_index, comp_data->wedge_sign, - sb_type, wedge_offset_x, wedge_offset_y); - mask_stride = MASK_MASTER_STRIDE; - break; -#if CONFIG_COMPOUND_SEGMENT - case COMPOUND_SEG: - mask = comp_data->seg_mask; - mask_stride = block_size_wide[sb_type]; - break; -#endif - default: assert(0); return; - } - aom_highbd_blend_a64_mask(dst_8, dst_stride, src0_8, src0_stride, src1_8, - src1_stride, mask, (int)mask_stride, h, w, subh, - subw, bd); -} -#endif // CONFIG_HIGHBITDEPTH -#else #if CONFIG_CONVOLVE_ROUND static void build_masked_compound_no_round( CONV_BUF_TYPE *dst, int dst_stride, const CONV_BUF_TYPE *src0, @@ -939,16 +881,12 @@ static void build_masked_compound_highbd( subh, subw, bd); } #endif // CONFIG_HIGHBITDEPTH -#endif // CONFIG_SUPERTX void av1_make_masked_inter_predictor( const uint8_t *pre, int pre_stride, uint8_t *dst, int dst_stride, const int subpel_x, const int subpel_y, const struct scale_factors *sf, int w, int h, ConvolveParams *conv_params, InterpFilters interp_filters, int xs, int ys, -#if CONFIG_SUPERTX - int wedge_offset_x, int wedge_offset_y, -#endif // CONFIG_SUPERTX #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION || CONFIG_COMPOUND_SEGMENT int plane, #endif @@ -1050,18 +988,6 @@ void av1_make_masked_inter_predictor( } #endif // CONFIG_COMPOUND_SEGMENT -#if CONFIG_SUPERTX -#if CONFIG_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) - build_masked_compound_wedge_extend_highbd( - dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, &comp_data, - mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd); - else -#endif // CONFIG_HIGHBITDEPTH - build_masked_compound_wedge_extend( - dst, dst_stride, dst, dst_stride, tmp_dst, MAX_SB_SIZE, &comp_data, - mi->mbmi.sb_type, wedge_offset_x, wedge_offset_y, h, w); -#else #if CONFIG_CONVOLVE_ROUND if (is_conv_no_round) { build_masked_compound_no_round(org_dst, org_dst_stride, org_dst, @@ -1095,7 +1021,6 @@ void av1_make_masked_inter_predictor( #if CONFIG_CONVOLVE_ROUND } #endif // CONFIG_CONVOLVE_ROUND -#endif // CONFIG_SUPERTX } // TODO(sarahparker) av1_highbd_build_inter_predictor and @@ -1182,11 +1107,7 @@ static INLINE void build_inter_predictors( #if CONFIG_MOTION_VAR const MODE_INFO *mi, int build_for_obmc, #endif // CONFIG_MOTION_VAR - int block, int bw, int bh, int x, int y, int w, int h, -#if CONFIG_SUPERTX - int wedge_offset_x, int wedge_offset_y, -#endif // CONFIG_SUPERTX - int mi_x, int mi_y) { + int block, int bw, int bh, int x, int y, int w, int h, int mi_x, int mi_y) { struct macroblockd_plane *const pd = &xd->plane[plane]; #if !CONFIG_MOTION_VAR const MODE_INFO *mi = xd->mi[0]; @@ -1365,19 +1286,11 @@ static INLINE void build_inter_predictors( if (is_masked_compound_type(mi->mbmi.interinter_compound_type)) { // masked compound type has its own average mechanism conv_params.do_average = 0; -#if CONFIG_CONVOLVE_ROUND && CONFIG_COMPOUND_SEGMENT && CONFIG_SUPERTX - // TODO(angiebird): convolve_round does not support compound_segment - // when supertx is on - conv_params = get_conv_params(ref, 0, plane); -#endif } if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_type)) av1_make_masked_inter_predictor( pre, pre_buf->stride, dst, dst_buf->stride, subpel_x, subpel_y, sf, b4_w, b4_h, &conv_params, mi->mbmi.interp_filters, xs, ys, -#if CONFIG_SUPERTX - wedge_offset_x, wedge_offset_y, -#endif // CONFIG_SUPERTX #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION || CONFIG_COMPOUND_SEGMENT plane, #endif @@ -1559,11 +1472,6 @@ static INLINE void build_inter_predictors( if (is_masked_compound_type(mi->mbmi.interinter_compound_type)) { // masked compound type has its own average mechanism conv_params.do_average = 0; -#if CONFIG_CONVOLVE_ROUND && CONFIG_COMPOUND_SEGMENT && CONFIG_SUPERTX - // TODO(angiebird): convolve_round does not support compound_segment - // when supertx is on - conv_params = get_conv_params(ref, 0, plane); -#endif } if (ref && is_masked_compound_type(mi->mbmi.interinter_compound_type)) @@ -1572,9 +1480,6 @@ static INLINE void build_inter_predictors( subpel_params[ref].subpel_x, subpel_params[ref].subpel_y, sf, w, h, &conv_params, mi->mbmi.interp_filters, subpel_params[ref].xs, subpel_params[ref].ys, -#if CONFIG_SUPERTX - wedge_offset_x, wedge_offset_y, -#endif // CONFIG_SUPERTX #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION || CONFIG_COMPOUND_SEGMENT plane, #endif @@ -1665,21 +1570,14 @@ static void build_inter_predictors_for_planes(const AV1_COMMON *cm, #if CONFIG_MOTION_VAR xd->mi[0], 0, #endif // CONFIG_MOTION_VAR - y * 2 + x, bw, bh, 4 * x, 4 * y, pw, ph, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + y * 2 + x, bw, bh, 4 * x, 4 * y, pw, ph, mi_x, + mi_y); } else { build_inter_predictors(cm, xd, plane, #if CONFIG_MOTION_VAR xd->mi[0], 0, #endif // CONFIG_MOTION_VAR - 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + 0, bw, bh, 0, 0, bw, bh, mi_x, mi_y); } } } @@ -1770,193 +1668,6 @@ void av1_setup_pre_planes(MACROBLOCKD *xd, int idx, } } -#if CONFIG_SUPERTX -#if CONFIG_CB4X4 -static const uint8_t mask_4[4] = { 64, 52, 12, 0 }; -static const uint8_t mask_4_uv[4] = { 64, 52, 12, 0 }; -#endif // CONFIG_CB4X4 -static const uint8_t mask_8[8] = { 64, 64, 62, 52, 12, 2, 0, 0 }; - -static const uint8_t mask_16[16] = { 63, 62, 60, 58, 55, 50, 43, 36, - 28, 21, 14, 9, 6, 4, 2, 1 }; - -static const uint8_t mask_32[32] = { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 63, - 61, 57, 52, 45, 36, 28, 19, 12, 7, 3, 1, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - -static const uint8_t mask_8_uv[8] = { 64, 64, 62, 52, 12, 2, 0, 0 }; - -static const uint8_t mask_16_uv[16] = { 64, 64, 64, 64, 61, 53, 45, 36, - 28, 19, 11, 3, 0, 0, 0, 0 }; - -static const uint8_t mask_32_uv[32] = { 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 60, 54, 46, 36, - 28, 18, 10, 4, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 }; - -static const uint8_t *get_supertx_mask(int length, int plane) { - switch (length) { -#if CONFIG_CB4X4 - case 4: return plane ? mask_4_uv : mask_4; -#endif // CONFIG_CB4X4 - case 8: return plane ? mask_8_uv : mask_8; - case 16: return plane ? mask_16_uv : mask_16; - case 32: return plane ? mask_32_uv : mask_32; - default: assert(0); - } - return NULL; -} - -void av1_build_masked_inter_predictor_complex( - MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre, - int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori, - BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition, - int plane) { - const struct macroblockd_plane *pd = &xd->plane[plane]; - const int ssx = pd->subsampling_x; - const int ssy = pd->subsampling_y; - const int top_w = block_size_wide[top_bsize] >> ssx; - const int top_h = block_size_high[top_bsize] >> ssy; - const int w = block_size_wide[bsize] >> ssx; - const int h = block_size_high[bsize] >> ssy; - const int w_offset = ((mi_col - mi_col_ori) * MI_SIZE) >> ssx; - const int h_offset = ((mi_row - mi_row_ori) * MI_SIZE) >> ssy; - - int w_remain, h_remain; - -#if CONFIG_HIGHBITDEPTH - const int is_hdb = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0; -#endif // CONFIG_HIGHBITDEPTH - - assert(bsize <= BLOCK_32X32); - assert(IMPLIES(plane == 0, ssx == 0)); - assert(IMPLIES(plane == 0, ssy == 0)); - - switch (partition) { - case PARTITION_HORZ: { - const uint8_t *const mask = get_supertx_mask(h, ssy); - - w_remain = top_w; - h_remain = top_h - h_offset - h; - dst += h_offset * dst_stride; - pre += h_offset * pre_stride; - -#if CONFIG_HIGHBITDEPTH - if (is_hdb) - aom_highbd_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, - pre_stride, mask, h, top_w, xd->bd); - else -#endif // CONFIG_HIGHBITDEPTH - aom_blend_a64_vmask(dst, dst_stride, dst, dst_stride, pre, pre_stride, - mask, h, top_w); - - dst += h * dst_stride; - pre += h * pre_stride; - break; - } - case PARTITION_VERT: { - const uint8_t *const mask = get_supertx_mask(w, ssx); - - w_remain = top_w - w_offset - w; - h_remain = top_h; - dst += w_offset; - pre += w_offset; - -#if CONFIG_HIGHBITDEPTH - if (is_hdb) - aom_highbd_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, - pre_stride, mask, top_h, w, xd->bd); - else -#endif // CONFIG_HIGHBITDEPTH - aom_blend_a64_hmask(dst, dst_stride, dst, dst_stride, pre, pre_stride, - mask, top_h, w); - - dst += w; - pre += w; - break; - } - default: { - assert(0); - return; - } - } - - if (w_remain == 0 || h_remain == 0) { - return; - } - -#if CONFIG_HIGHBITDEPTH - if (is_hdb) { - dst = (uint8_t *)CONVERT_TO_SHORTPTR(dst); - pre = (const uint8_t *)CONVERT_TO_SHORTPTR(pre); - dst_stride *= 2; - pre_stride *= 2; - w_remain *= 2; - } -#endif // CONFIG_HIGHBITDEPTH - - do { - memcpy(dst, pre, w_remain * sizeof(uint8_t)); - dst += dst_stride; - pre += pre_stride; - } while (--h_remain); -} - -void av1_build_inter_predictor_sb_sub8x8_extend(const AV1_COMMON *cm, - MACROBLOCKD *xd, int mi_row_ori, - int mi_col_ori, int mi_row, - int mi_col, int plane, - BLOCK_SIZE bsize, int block) { - // Prediction function used in supertx: - // Use the mv at current block (which is less than 8x8) - // to get prediction of a block located at (mi_row, mi_col) at size of bsize - // bsize can be larger than 8x8. - // block (0-3): the sub8x8 location of current block - const int mi_x = mi_col * MI_SIZE; - const int mi_y = mi_row * MI_SIZE; - const int wedge_offset_x = (mi_col_ori - mi_col) * MI_SIZE; - const int wedge_offset_y = (mi_row_ori - mi_row) * MI_SIZE; - - // For sub8x8 uv: - // Skip uv prediction in supertx except the first block (block = 0) - int max_plane = block ? 1 : MAX_MB_PLANE; - if (plane >= max_plane) return; - - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]); - const int num_4x4_w = num_4x4_blocks_wide_lookup[plane_bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[plane_bsize]; - const int bw = 4 * num_4x4_w; - const int bh = 4 * num_4x4_h; - - build_inter_predictors(cm, xd, plane, -#if CONFIG_MOTION_VAR - xd->mi[0], 0, -#endif // CONFIG_MOTION_VAR - block, bw, bh, 0, 0, bw, bh, wedge_offset_x, - wedge_offset_y, mi_x, mi_y); -} - -void av1_build_inter_predictor_sb_extend(const AV1_COMMON *cm, MACROBLOCKD *xd, - int mi_row_ori, int mi_col_ori, - int mi_row, int mi_col, int plane, - BLOCK_SIZE bsize) { - const int mi_x = mi_col * MI_SIZE; - const int mi_y = mi_row * MI_SIZE; - const int wedge_offset_x = (mi_col_ori - mi_col) * MI_SIZE; - const int wedge_offset_y = (mi_row_ori - mi_row) * MI_SIZE; - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, &xd->plane[plane]); - const int bw = block_size_wide[plane_bsize]; - const int bh = block_size_high[plane_bsize]; - - build_inter_predictors(cm, xd, plane, -#if CONFIG_MOTION_VAR - xd->mi[0], 0, -#endif // CONFIG_MOTION_VAR - 0, bw, bh, 0, 0, bw, bh, wedge_offset_x, - wedge_offset_y, mi_x, mi_y); -} -#endif // CONFIG_SUPERTX - #if CONFIG_MOTION_VAR // obmc_mask_N[overlap_position] static const uint8_t obmc_mask_1[1] = { 64 }; @@ -2294,11 +2005,7 @@ static INLINE void build_prediction_by_above_pred(MACROBLOCKD *xd, if (skip_u4x4_pred_in_obmc(bsize, pd, 0)) continue; build_inter_predictors(ctxt->cm, xd, j, above_mi, 1, 0, bw, bh, 0, 0, bw, - bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + bh, mi_x, mi_y); } *above_mbmi = backup_mbmi; } @@ -2395,9 +2102,6 @@ static INLINE void build_prediction_by_left_pred(MACROBLOCKD *xd, if (skip_u4x4_pred_in_obmc(bsize, pd, 1)) continue; build_inter_predictors(ctxt->cm, xd, j, left_mi, 1, 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX mi_x, mi_y); } *left_mbmi = backup_mbmi; @@ -2562,20 +2266,12 @@ void av1_build_prediction_by_bottom_preds(const AV1_COMMON *cm, MACROBLOCKD *xd, build_inter_predictors(cm, xd, j, mi, 1, y * 2 + x, bw, bh, (4 * x) >> pd->subsampling_x, xd->n8_h == 1 ? (4 >> pd->subsampling_y) : 0, - pw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + pw, bh, mi_x, mi_y); } } else { build_inter_predictors(cm, xd, j, mi, 1, 0, bw, bh, 0, xd->n8_h == 1 ? (4 >> pd->subsampling_y) : 0, bw, - bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + bh, mi_x, mi_y); } } *mbmi = backup_mbmi; @@ -2667,20 +2363,13 @@ void av1_build_prediction_by_right_preds(const AV1_COMMON *cm, MACROBLOCKD *xd, build_inter_predictors(cm, xd, j, mi, 1, y * 2 + x, bw, bh, xd->n8_w == 1 ? 4 >> pd->subsampling_x : 0, - (4 * y) >> pd->subsampling_y, bw, ph, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + (4 * y) >> pd->subsampling_y, bw, ph, mi_x, + mi_y); } } else { build_inter_predictors(cm, xd, j, mi, 1, 0, bw, bh, xd->n8_w == 1 ? 4 >> pd->subsampling_x : 0, 0, - bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + bw, bh, mi_x, mi_y); } } *mbmi = backup_mbmi; @@ -3299,12 +2988,8 @@ void av1_build_inter_predictors_for_planes_single_buf( } static void build_wedge_inter_predictor_from_buf( - MACROBLOCKD *xd, int plane, int x, int y, int w, int h, -#if CONFIG_SUPERTX - int wedge_offset_x, int wedge_offset_y, -#endif // CONFIG_SUPERTX - uint8_t *ext_dst0, int ext_dst_stride0, uint8_t *ext_dst1, - int ext_dst_stride1) { + MACROBLOCKD *xd, int plane, int x, int y, int w, int h, uint8_t *ext_dst0, + int ext_dst_stride0, uint8_t *ext_dst1, int ext_dst_stride1) { MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; const int is_compound = has_second_ref(mbmi); MACROBLOCKD_PLANE *const pd = &xd->plane[plane]; @@ -3346,20 +3031,6 @@ static void build_wedge_inter_predictor_from_buf( } #endif // CONFIG_COMPOUND_SEGMENT -#if CONFIG_SUPERTX -#if CONFIG_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) - build_masked_compound_wedge_extend_highbd( - dst, dst_buf->stride, CONVERT_TO_BYTEPTR(ext_dst0), ext_dst_stride0, - CONVERT_TO_BYTEPTR(ext_dst1), ext_dst_stride1, &comp_data, - mbmi->sb_type, wedge_offset_x, wedge_offset_y, h, w, xd->bd); - else -#endif // CONFIG_HIGHBITDEPTH - build_masked_compound_wedge_extend( - dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1, - ext_dst_stride1, &comp_data, mbmi->sb_type, wedge_offset_x, - wedge_offset_y, h, w); -#else // !CONFIG_SUPERTX #if CONFIG_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) build_masked_compound_highbd( @@ -3371,7 +3042,6 @@ static void build_wedge_inter_predictor_from_buf( build_masked_compound(dst, dst_buf->stride, ext_dst0, ext_dst_stride0, ext_dst1, ext_dst_stride1, &comp_data, mbmi->sb_type, h, w); -#endif // CONFIG_SUPERTX } else { #if CONFIG_HIGHBITDEPTH if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) @@ -3385,13 +3055,12 @@ static void build_wedge_inter_predictor_from_buf( } } -void av1_build_wedge_inter_predictor_from_buf( - MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, -#if CONFIG_SUPERTX - int wedge_offset_x, int wedge_offset_y, -#endif // CONFIG_SUPERTX - uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3], - int ext_dst_stride1[3]) { +void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize, + int plane_from, int plane_to, + uint8_t *ext_dst0[3], + int ext_dst_stride0[3], + uint8_t *ext_dst1[3], + int ext_dst_stride1[3]) { int plane; for (plane = plane_from; plane <= plane_to; ++plane) { const BLOCK_SIZE plane_bsize = @@ -3405,22 +3074,14 @@ void av1_build_wedge_inter_predictor_from_buf( for (y = 0; y < num_4x4_h; ++y) for (x = 0; x < num_4x4_w; ++x) build_wedge_inter_predictor_from_buf( - xd, plane, 4 * x, 4 * y, 4, 4, -#if CONFIG_SUPERTX - wedge_offset_x, wedge_offset_y, -#endif // CONFIG_SUPERTX - ext_dst0[plane], ext_dst_stride0[plane], ext_dst1[plane], - ext_dst_stride1[plane]); + xd, plane, 4 * x, 4 * y, 4, 4, ext_dst0[plane], + ext_dst_stride0[plane], ext_dst1[plane], ext_dst_stride1[plane]); } else { const int bw = block_size_wide[plane_bsize]; const int bh = block_size_high[plane_bsize]; build_wedge_inter_predictor_from_buf( - xd, plane, 0, 0, bw, bh, -#if CONFIG_SUPERTX - wedge_offset_x, wedge_offset_y, -#endif // CONFIG_SUPERTX - ext_dst0[plane], ext_dst_stride0[plane], ext_dst1[plane], - ext_dst_stride1[plane]); + xd, plane, 0, 0, bw, bh, ext_dst0[plane], ext_dst_stride0[plane], + ext_dst1[plane], ext_dst_stride1[plane]); } } } @@ -3635,9 +3296,6 @@ void get_pred_by_horz_neighbor(const AV1_COMMON *cm, MACROBLOCKD *xd, int bsize, bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y; build_inter_predictors(cm, xd, j, left_mi, 1, 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX mi_x, mi_y); } *left_mbmi = backup_mbmi; @@ -3730,9 +3388,6 @@ void get_pred_by_horz_neighbor(const AV1_COMMON *cm, MACROBLOCKD *xd, int bsize, bh = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y; build_inter_predictors(cm, xd, j, right_mi, 1, 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX mi_x, mi_y); } @@ -3849,9 +3504,6 @@ void get_pred_by_vert_neighbor(const AV1_COMMON *cm, MACROBLOCKD *xd, int bsize, bw = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y; build_inter_predictors(cm, xd, j, above_mi, 1, 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX mi_x, mi_y); } @@ -3949,9 +3601,6 @@ void get_pred_by_vert_neighbor(const AV1_COMMON *cm, MACROBLOCKD *xd, int bsize, bw = (mi_step << MI_SIZE_LOG2) >> pd->subsampling_y; build_inter_predictors(cm, xd, j, bottom_mi, 1, 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX mi_x, mi_y); } @@ -4059,9 +3708,6 @@ void get_pred_by_corner_neighbor(const AV1_COMMON *cm, MACROBLOCKD *xd, bh = mi_high << MI_SIZE_LOG2 >> (pd->subsampling_x + 1); bw = mi_wide << MI_SIZE_LOG2 >> (pd->subsampling_y + 1); build_inter_predictors(cm, xd, j, corner_mi, 1, 0, bw, bh, 0, 0, bw, bh, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX mi_x, mi_y); } *corner_mbmi = backup_mbmi; @@ -4119,13 +3765,9 @@ void av1_get_ori_blk_pred(const AV1_COMMON *cm, MACROBLOCKD *xd, int bsize, for (i = 0; i < MAX_MB_PLANE; ++i) { const struct macroblockd_plane *pd = &xd->plane[i]; - build_inter_predictors(cm, xd, i, mi, 1, 0, bw >> pd->subsampling_x, - bh >> pd->subsampling_y, 0, 0, - bw >> pd->subsampling_x, bh >> pd->subsampling_y, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - mi_x, mi_y); + build_inter_predictors( + cm, xd, i, mi, 1, 0, bw >> pd->subsampling_x, bh >> pd->subsampling_y, + 0, 0, bw >> pd->subsampling_x, bh >> pd->subsampling_y, mi_x, mi_y); } } diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h index 267f1b446..97cb0a61f 100644 --- a/av1/common/reconinter.h +++ b/av1/common/reconinter.h @@ -286,9 +286,6 @@ void av1_make_masked_inter_predictor( const int subpel_x, const int subpel_y, const struct scale_factors *sf, int w, int h, ConvolveParams *conv_params, InterpFilters interp_filters, int xs, int ys, -#if CONFIG_SUPERTX - int wedge_offset_x, int wedge_offset_y, -#endif // CONFIG_SUPERTX #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION || CONFIG_COMPOUND_SEGMENT int plane, #endif @@ -375,25 +372,6 @@ void av1_build_inter_predictors_sb(const AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row, int mi_col, BUFFER_SET *ctx, BLOCK_SIZE bsize); -#if CONFIG_SUPERTX -void av1_build_inter_predictor_sb_sub8x8_extend(const AV1_COMMON *cm, - MACROBLOCKD *xd, int mi_row_ori, - int mi_col_ori, int mi_row, - int mi_col, int plane, - BLOCK_SIZE bsize, int block); - -void av1_build_inter_predictor_sb_extend(const AV1_COMMON *cm, MACROBLOCKD *xd, - int mi_row_ori, int mi_col_ori, - int mi_row, int mi_col, int plane, - BLOCK_SIZE bsize); -struct macroblockd_plane; -void av1_build_masked_inter_predictor_complex( - MACROBLOCKD *xd, uint8_t *dst, int dst_stride, const uint8_t *pre, - int pre_stride, int mi_row, int mi_col, int mi_row_ori, int mi_col_ori, - BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, PARTITION_TYPE partition, - int plane); -#endif // CONFIG_SUPERTX - void av1_build_inter_predictor(const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride, const MV *src_mv, const struct scale_factors *sf, int w, int h, @@ -631,13 +609,12 @@ void av1_combine_interintra(MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane, void av1_build_inter_predictors_for_planes_single_buf( MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, int mi_row, int mi_col, int ref, uint8_t *ext_dst[3], int ext_dst_stride[3]); -void av1_build_wedge_inter_predictor_from_buf( - MACROBLOCKD *xd, BLOCK_SIZE bsize, int plane_from, int plane_to, -#if CONFIG_SUPERTX - int wedge_offset_x, int wedge_offset_y, -#endif // CONFIG_SUPERTX - uint8_t *ext_dst0[3], int ext_dst_stride0[3], uint8_t *ext_dst1[3], - int ext_dst_stride1[3]); +void av1_build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, BLOCK_SIZE bsize, + int plane_from, int plane_to, + uint8_t *ext_dst0[3], + int ext_dst_stride0[3], + uint8_t *ext_dst1[3], + int ext_dst_stride1[3]); #if CONFIG_NCOBMC_ADAPT_WEIGHT #define ASSIGN_ALIGNED_PTRS(p, a, s) \ diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c index f3a512687..bfa87a9b0 100644 --- a/av1/decoder/decodeframe.c +++ b/av1/decoder/decodeframe.c @@ -593,7 +593,7 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd, } #endif // CONFIG_VAR_TX -#if !CONFIG_VAR_TX || CONFIG_SUPERTX || CONFIG_COEF_INTERLEAVE || \ +#if !CONFIG_VAR_TX || CONFIG_COEF_INTERLEAVE || \ (!CONFIG_VAR_TX && CONFIG_EXT_TX && CONFIG_RECT_TX) static int reconstruct_inter_block(AV1_COMMON *cm, MACROBLOCKD *const xd, aom_reader *const r, int segment_id, @@ -690,900 +690,7 @@ static void set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd, mi_col); } -#if CONFIG_SUPERTX -static MB_MODE_INFO *set_offsets_extend(AV1_COMMON *const cm, - MACROBLOCKD *const xd, - const TileInfo *const tile, - BLOCK_SIZE bsize_pred, int mi_row_pred, - int mi_col_pred, int mi_row_ori, - int mi_col_ori) { - // Used in supertx - // (mi_row_ori, mi_col_ori): location for mv - // (mi_row_pred, mi_col_pred, bsize_pred): region to predict - const int bw = mi_size_wide[bsize_pred]; - const int bh = mi_size_high[bsize_pred]; - const int offset = mi_row_ori * cm->mi_stride + mi_col_ori; - xd->mi = cm->mi_grid_visible + offset; - xd->mi[0] = cm->mi + offset; - set_mi_row_col(xd, tile, mi_row_pred, bh, mi_col_pred, bw, -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - - xd->up_available = (mi_row_ori > tile->mi_row_start); - xd->left_available = (mi_col_ori > tile->mi_col_start); - - set_plane_n4(xd, bw, bh); - - return &xd->mi[0]->mbmi; -} - -#if CONFIG_SUPERTX -static MB_MODE_INFO *set_mb_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd, - BLOCK_SIZE bsize, int mi_row, int mi_col, - int bw, int bh, int x_mis, int y_mis) { - const int offset = mi_row * cm->mi_stride + mi_col; - const TileInfo *const tile = &xd->tile; - int x, y; - - xd->mi = cm->mi_grid_visible + offset; - xd->mi[0] = cm->mi + offset; - xd->mi[0]->mbmi.sb_type = bsize; - for (y = 0; y < y_mis; ++y) - for (x = !y; x < x_mis; ++x) xd->mi[y * cm->mi_stride + x] = xd->mi[0]; - - set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - return &xd->mi[0]->mbmi; -} -#endif - -static void set_offsets_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd, - const TileInfo *const tile, BLOCK_SIZE bsize, - int mi_row, int mi_col) { - const int bw = mi_size_wide[bsize]; - const int bh = mi_size_high[bsize]; - const int offset = mi_row * cm->mi_stride + mi_col; - - xd->mi = cm->mi_grid_visible + offset; - xd->mi[0] = cm->mi + offset; - - set_plane_n4(xd, bw, bh); - - set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - - av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row, - mi_col); -} - -static void set_param_topblock(AV1_COMMON *const cm, MACROBLOCKD *const xd, - BLOCK_SIZE bsize, int mi_row, int mi_col, - int txfm, int skip) { - const int bw = mi_size_wide[bsize]; - const int bh = mi_size_high[bsize]; - const int x_mis = AOMMIN(bw, cm->mi_cols - mi_col); - const int y_mis = AOMMIN(bh, cm->mi_rows - mi_row); - const int offset = mi_row * cm->mi_stride + mi_col; - int x, y; - - xd->mi = cm->mi_grid_visible + offset; - xd->mi[0] = cm->mi + offset; - - for (y = 0; y < y_mis; ++y) - for (x = 0; x < x_mis; ++x) { - xd->mi[y * cm->mi_stride + x]->mbmi.skip = skip; - xd->mi[y * cm->mi_stride + x]->mbmi.tx_type = txfm; - } -#if CONFIG_VAR_TX - xd->above_txfm_context = cm->above_txfm_context + mi_col; - xd->left_txfm_context = - xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK); - set_txfm_ctxs(xd->mi[0]->mbmi.tx_size, bw, bh, skip, xd); -#endif -} - -static void set_ref(AV1_COMMON *const cm, MACROBLOCKD *const xd, int idx, - int mi_row, int mi_col) { - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; -#if CONFIG_COMPOUND_SINGLEREF - RefBuffer *ref_buffer = - has_second_ref(mbmi) ? &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME] - : &cm->frame_refs[mbmi->ref_frame[0] - LAST_FRAME]; -#else - RefBuffer *ref_buffer = &cm->frame_refs[mbmi->ref_frame[idx] - LAST_FRAME]; -#endif // CONFIG_COMPOUND_SINGLEREF - xd->block_refs[idx] = ref_buffer; - if (!av1_is_valid_scale(&ref_buffer->sf)) - aom_internal_error(&cm->error, AOM_CODEC_UNSUP_BITSTREAM, - "Invalid scale factors"); - av1_setup_pre_planes(xd, idx, ref_buffer->buf, mi_row, mi_col, - &ref_buffer->sf); - aom_merge_corrupted_flag(&xd->corrupted, ref_buffer->buf->corrupted); -} - -static void dec_predict_b_extend( - AV1Decoder *const pbi, MACROBLOCKD *const xd, const TileInfo *const tile, - int block, int mi_row_ori, int mi_col_ori, int mi_row_pred, int mi_col_pred, - int mi_row_top, int mi_col_top, int plane, uint8_t *dst_buf, int dst_stride, - BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, int b_sub8x8, int bextend) { - // Used in supertx - // (mi_row_ori, mi_col_ori): location for mv - // (mi_row_pred, mi_col_pred, bsize_pred): region to predict - // (mi_row_top, mi_col_top, bsize_top): region of the top partition size - // block: sub location of sub8x8 blocks - // b_sub8x8: 1: ori is sub8x8; 0: ori is not sub8x8 - // bextend: 1: region to predict is an extension of ori; 0: not - int r = (mi_row_pred - mi_row_top) * MI_SIZE; - int c = (mi_col_pred - mi_col_top) * MI_SIZE; - const int mi_width_top = mi_size_wide[bsize_top]; - const int mi_height_top = mi_size_high[bsize_top]; - MB_MODE_INFO *mbmi; - AV1_COMMON *const cm = &pbi->common; - - if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top || - mi_row_pred >= mi_row_top + mi_height_top || - mi_col_pred >= mi_col_top + mi_width_top || mi_row_pred >= cm->mi_rows || - mi_col_pred >= cm->mi_cols) - return; - - mbmi = set_offsets_extend(cm, xd, tile, bsize_pred, mi_row_pred, mi_col_pred, - mi_row_ori, mi_col_ori); - set_ref(cm, xd, 0, mi_row_pred, mi_col_pred); - if (has_second_ref(&xd->mi[0]->mbmi) -#if CONFIG_COMPOUND_SINGLEREF - || is_inter_singleref_comp_mode(xd->mi[0]->mbmi.mode) -#endif // CONFIG_COMPOUND_SINGLEREF - ) - set_ref(cm, xd, 1, mi_row_pred, mi_col_pred); - if (!bextend) mbmi->tx_size = max_txsize_lookup[bsize_top]; - - xd->plane[plane].dst.stride = dst_stride; - xd->plane[plane].dst.buf = - dst_buf + (r >> xd->plane[plane].subsampling_y) * dst_stride + - (c >> xd->plane[plane].subsampling_x); - - if (!b_sub8x8) - av1_build_inter_predictor_sb_extend(&pbi->common, xd, mi_row_ori, - mi_col_ori, mi_row_pred, mi_col_pred, - plane, bsize_pred); - else - av1_build_inter_predictor_sb_sub8x8_extend( - &pbi->common, xd, mi_row_ori, mi_col_ori, mi_row_pred, mi_col_pred, - plane, bsize_pred, block); -} - -static void dec_extend_dir(AV1Decoder *const pbi, MACROBLOCKD *const xd, - const TileInfo *const tile, int block, - BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, - int mi_row_ori, int mi_col_ori, int mi_row, - int mi_col, int mi_row_top, int mi_col_top, - int plane, uint8_t *dst_buf, int dst_stride, - int dir) { - // dir: 0-lower, 1-upper, 2-left, 3-right - // 4-lowerleft, 5-upperleft, 6-lowerright, 7-upperright - const int mi_width = mi_size_wide[bsize]; - const int mi_height = mi_size_high[bsize]; - int xss = xd->plane[1].subsampling_x; - int yss = xd->plane[1].subsampling_y; -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; -#endif - int b_sub8x8 = (bsize < BLOCK_8X8) && !unify_bsize ? 1 : 0; - BLOCK_SIZE extend_bsize; - int mi_row_pred, mi_col_pred; - - int wide_unit, high_unit; - int i, j; - int ext_offset = 0; - - if (dir == 0 || dir == 1) { - extend_bsize = - (mi_width == mi_size_wide[BLOCK_8X8] || bsize < BLOCK_8X8 || xss < yss) - ? BLOCK_8X8 - : BLOCK_16X8; -#if CONFIG_CB4X4 - if (bsize < BLOCK_8X8) { - extend_bsize = BLOCK_4X4; - ext_offset = mi_size_wide[BLOCK_8X8]; - } -#endif - - wide_unit = mi_size_wide[extend_bsize]; - high_unit = mi_size_high[extend_bsize]; - - mi_row_pred = mi_row + ((dir == 0) ? mi_height : -(mi_height + ext_offset)); - mi_col_pred = mi_col; - - for (j = 0; j < mi_height + ext_offset; j += high_unit) - for (i = 0; i < mi_width + ext_offset; i += wide_unit) - dec_predict_b_extend(pbi, xd, tile, block, mi_row_ori, mi_col_ori, - mi_row_pred + j, mi_col_pred + i, mi_row_top, - mi_col_top, plane, dst_buf, dst_stride, top_bsize, - extend_bsize, b_sub8x8, 1); - } else if (dir == 2 || dir == 3) { - extend_bsize = - (mi_height == mi_size_high[BLOCK_8X8] || bsize < BLOCK_8X8 || yss < xss) - ? BLOCK_8X8 - : BLOCK_8X16; -#if CONFIG_CB4X4 - if (bsize < BLOCK_8X8) { - extend_bsize = BLOCK_4X4; - ext_offset = mi_size_wide[BLOCK_8X8]; - } -#endif - - wide_unit = mi_size_wide[extend_bsize]; - high_unit = mi_size_high[extend_bsize]; - - mi_row_pred = mi_row; - mi_col_pred = mi_col + ((dir == 3) ? mi_width : -(mi_width + ext_offset)); - - for (j = 0; j < mi_height + ext_offset; j += high_unit) - for (i = 0; i < mi_width + ext_offset; i += wide_unit) - dec_predict_b_extend(pbi, xd, tile, block, mi_row_ori, mi_col_ori, - mi_row_pred + j, mi_col_pred + i, mi_row_top, - mi_col_top, plane, dst_buf, dst_stride, top_bsize, - extend_bsize, b_sub8x8, 1); - } else { - extend_bsize = BLOCK_8X8; -#if CONFIG_CB4X4 - if (bsize < BLOCK_8X8) { - extend_bsize = BLOCK_4X4; - ext_offset = mi_size_wide[BLOCK_8X8]; - } -#endif - wide_unit = mi_size_wide[extend_bsize]; - high_unit = mi_size_high[extend_bsize]; - - mi_row_pred = mi_row + ((dir == 4 || dir == 6) ? mi_height - : -(mi_height + ext_offset)); - mi_col_pred = - mi_col + ((dir == 6 || dir == 7) ? mi_width : -(mi_width + ext_offset)); - - for (j = 0; j < mi_height + ext_offset; j += high_unit) - for (i = 0; i < mi_width + ext_offset; i += wide_unit) - dec_predict_b_extend(pbi, xd, tile, block, mi_row_ori, mi_col_ori, - mi_row_pred + j, mi_col_pred + i, mi_row_top, - mi_col_top, plane, dst_buf, dst_stride, top_bsize, - extend_bsize, b_sub8x8, 1); - } -} - -static void dec_extend_all(AV1Decoder *const pbi, MACROBLOCKD *const xd, - const TileInfo *const tile, int block, - BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, - int mi_row_ori, int mi_col_ori, int mi_row, - int mi_col, int mi_row_top, int mi_col_top, - int plane, uint8_t *dst_buf, int dst_stride) { - for (int i = 0; i < 8; ++i) { - dec_extend_dir(pbi, xd, tile, block, bsize, top_bsize, mi_row_ori, - mi_col_ori, mi_row, mi_col, mi_row_top, mi_col_top, plane, - dst_buf, dst_stride, i); - } -} - -static void dec_predict_sb_complex(AV1Decoder *const pbi, MACROBLOCKD *const xd, - const TileInfo *const tile, int mi_row, - int mi_col, int mi_row_top, int mi_col_top, - BLOCK_SIZE bsize, BLOCK_SIZE top_bsize, - uint8_t *dst_buf[3], int dst_stride[3]) { - const AV1_COMMON *const cm = &pbi->common; - const int hbs = mi_size_wide[bsize] / 2; - const PARTITION_TYPE partition = get_partition(cm, mi_row, mi_col, bsize); - const BLOCK_SIZE subsize = get_subsize(bsize, partition); -#if CONFIG_EXT_PARTITION_TYPES - const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT); -#endif - int i; - const int mi_offset = mi_row * cm->mi_stride + mi_col; - uint8_t *dst_buf1[3], *dst_buf2[3], *dst_buf3[3]; -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; -#endif - - DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_TX_SQUARE * 2]); - DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_TX_SQUARE * 2]); - DECLARE_ALIGNED(16, uint8_t, tmp_buf3[MAX_MB_PLANE * MAX_TX_SQUARE * 2]); - int dst_stride1[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE }; - int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE }; - int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE }; - -#if CONFIG_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - int len = sizeof(uint16_t); - dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1); - dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_TX_SQUARE * len); - dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + 2 * MAX_TX_SQUARE * len); - dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2); - dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_TX_SQUARE * len); - dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_TX_SQUARE * len); - dst_buf3[0] = CONVERT_TO_BYTEPTR(tmp_buf3); - dst_buf3[1] = CONVERT_TO_BYTEPTR(tmp_buf3 + MAX_TX_SQUARE * len); - dst_buf3[2] = CONVERT_TO_BYTEPTR(tmp_buf3 + 2 * MAX_TX_SQUARE * len); - } else { -#endif - dst_buf1[0] = tmp_buf1; - dst_buf1[1] = tmp_buf1 + MAX_TX_SQUARE; - dst_buf1[2] = tmp_buf1 + 2 * MAX_TX_SQUARE; - dst_buf2[0] = tmp_buf2; - dst_buf2[1] = tmp_buf2 + MAX_TX_SQUARE; - dst_buf2[2] = tmp_buf2 + 2 * MAX_TX_SQUARE; - dst_buf3[0] = tmp_buf3; - dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE; - dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE; -#if CONFIG_HIGHBITDEPTH - } -#endif - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - - xd->mi = cm->mi_grid_visible + mi_offset; - xd->mi[0] = cm->mi + mi_offset; - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - } - - switch (partition) { - case PARTITION_NONE: - assert(bsize < top_bsize); - for (i = 0; i < MAX_MB_PLANE; i++) { - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, bsize, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - } - break; - case PARTITION_HORZ: - if (bsize == BLOCK_8X8 && !unify_bsize) { - for (i = 0; i < MAX_MB_PLANE; i++) { - // For sub8x8, predict in 8x8 unit - // First half - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, BLOCK_8X8, 1, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - - // Second half - dec_predict_b_extend(pbi, xd, tile, 2, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], top_bsize, BLOCK_8X8, 1, 1); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 2, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf1[i], dst_stride1[i]); - } - - // weighted average to smooth the boundary - xd->plane[0].dst.buf = dst_buf[0]; - xd->plane[0].dst.stride = dst_stride[0]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - 0); - } else { - for (i = 0; i < MAX_MB_PLANE; i++) { -#if CONFIG_CB4X4 - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - - if (handle_chroma_sub8x8) { - int mode_offset_row = CONFIG_CHROMA_SUB8X8 ? hbs : 0; - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + mode_offset_row, - mi_col, mi_row, mi_col, mi_row_top, mi_col_top, - i, dst_buf[i], dst_stride[i], top_bsize, bsize, - 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, bsize, top_bsize, - mi_row + mode_offset_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - } else { -#endif - // First half - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, - mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i], 0); - - if (mi_row + hbs < cm->mi_rows) { - // Second half - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, - mi_row + hbs, mi_col, mi_row_top, mi_col_top, - i, dst_buf1[i], dst_stride1[i], top_bsize, - subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, - mi_row + hbs, mi_col, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i]); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, - mi_row + hbs, mi_col, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], 1); - - // weighted average to smooth the boundary - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } -#if CONFIG_CB4X4 - } -#endif - } - } - break; - case PARTITION_VERT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - for (i = 0; i < MAX_MB_PLANE; i++) { - // First half - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, BLOCK_8X8, 1, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - - // Second half - dec_predict_b_extend(pbi, xd, tile, 1, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], top_bsize, BLOCK_8X8, 1, 1); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 1, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf1[i], dst_stride1[i]); - } - - // Smooth - xd->plane[0].dst.buf = dst_buf[0]; - xd->plane[0].dst.stride = dst_stride[0]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - 0); - } else { - for (i = 0; i < MAX_MB_PLANE; i++) { -#if CONFIG_CB4X4 - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - - if (handle_chroma_sub8x8) { - int mode_offset_col = CONFIG_CHROMA_SUB8X8 ? hbs : 0; - assert(i > 0 && bsize == BLOCK_8X8); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, - mi_col + mode_offset_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, bsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, bsize, top_bsize, mi_row, - mi_col + mode_offset_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - } else { -#endif - // First half - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, - mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i], 3); - - // Second half - if (mi_col + hbs < cm->mi_cols) { - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, - mi_row, mi_col + hbs, mi_row_top, mi_col_top, - i, dst_buf1[i], dst_stride1[i], top_bsize, - subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf1[i], dst_stride1[i]); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf1[i], dst_stride1[i], 2); - - // Smooth - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - } -#if CONFIG_CB4X4 - } -#endif - } - } - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - for (i = 0; i < MAX_MB_PLANE; i++) { - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, BLOCK_8X8, 1, 0); - dec_predict_b_extend(pbi, xd, tile, 1, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], top_bsize, BLOCK_8X8, 1, 1); - dec_predict_b_extend(pbi, xd, tile, 2, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf2[i], - dst_stride2[i], top_bsize, BLOCK_8X8, 1, 1); - dec_predict_b_extend(pbi, xd, tile, 3, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf3[i], - dst_stride3[i], top_bsize, BLOCK_8X8, 1, 1); - if (bsize < top_bsize) { - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - dec_extend_all(pbi, xd, tile, 1, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf1[i], dst_stride1[i]); - dec_extend_all(pbi, xd, tile, 2, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf2[i], dst_stride2[i]); - dec_extend_all(pbi, xd, tile, 3, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf3[i], dst_stride3[i]); - } - } -#if CONFIG_CB4X4 - } else if (bsize == BLOCK_8X8) { - for (i = 0; i < MAX_MB_PLANE; i++) { - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - - if (handle_chroma_sub8x8) { - int mode_offset_row = - CONFIG_CHROMA_SUB8X8 && mi_row + hbs < cm->mi_rows ? hbs : 0; - int mode_offset_col = - CONFIG_CHROMA_SUB8X8 && mi_col + hbs < cm->mi_cols ? hbs : 0; - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + mode_offset_row, - mi_col + mode_offset_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, BLOCK_8X8, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, BLOCK_8X8, top_bsize, - mi_row + mode_offset_row, mi_col + mode_offset_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - } else { - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, - mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, subsize, 0, 0); - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, - mi_row, mi_col + hbs, mi_row_top, mi_col_top, - i, dst_buf1[i], dst_stride1[i], top_bsize, - subsize, 0, 0); - if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, - mi_row + hbs, mi_col, mi_row_top, mi_col_top, - i, dst_buf2[i], dst_stride2[i], top_bsize, - subsize, 0, 0); - if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs, - mi_row + hbs, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf3[i], dst_stride3[i], - top_bsize, subsize, 0, 0); - - if (bsize < top_bsize) { - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col, mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i]); - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf1[i], dst_stride1[i]); - if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, - mi_row + hbs, mi_col, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, i, dst_buf2[i], - dst_stride2[i]); - if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, - mi_row + hbs, mi_col + hbs, mi_row + hbs, - mi_col + hbs, mi_row_top, mi_col_top, i, - dst_buf3[i], dst_stride3[i]); - } - } - } -#endif - } else { - dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row_top, - mi_col_top, subsize, top_bsize, dst_buf, - dst_stride); - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) - dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, subsize, top_bsize, - dst_buf1, dst_stride1); - if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) - dec_predict_sb_complex(pbi, xd, tile, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, subsize, top_bsize, - dst_buf2, dst_stride2); - if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) - dec_predict_sb_complex(pbi, xd, tile, mi_row + hbs, mi_col + hbs, - mi_row_top, mi_col_top, subsize, top_bsize, - dst_buf3, dst_stride3); - } - for (i = 0; i < MAX_MB_PLANE; i++) { -#if CONFIG_CB4X4 - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - if (handle_chroma_sub8x8) continue; // Skip <4x4 chroma smoothing -#else - if (bsize == BLOCK_8X8 && i != 0) - continue; // Skip <4x4 chroma smoothing -#endif - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - if (mi_row + hbs < cm->mi_rows) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } - } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } - } - break; -#if CONFIG_EXT_PARTITION_TYPES -#if CONFIG_EXT_PARTITION_TYPES_AB -#error HORZ/VERT_A/B partitions not yet updated in superres code -#endif - case PARTITION_HORZ_A: - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, - top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf1, - dst_stride1, top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, dst_buf1, dst_stride1); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf2, - dst_stride2, top_bsize, subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2, - 1); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - i); - } - break; - case PARTITION_VERT_A: - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, - top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf1, - dst_stride1, top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, dst_buf1, dst_stride1); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf2, - dst_stride2, top_bsize, subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf2, - dst_stride2); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf2, - dst_stride2, 2); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - i); - } - break; - case PARTITION_HORZ_B: - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, - top_bsize, subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, 0); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf1, - dst_stride1, top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, dst_buf1, dst_stride1); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs, - mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top, - dst_buf2, dst_stride2, top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf2, - dst_stride2); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf1[i]; - xd->plane[i].dst.stride = dst_stride1[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - i); - } - break; - case PARTITION_VERT_B: - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, - top_bsize, subsize, 0, 0); - if (bsize < top_bsize) - dec_extend_all(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride); - else - dec_extend_dir(pbi, xd, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, 3); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf1, - dst_stride1, top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, dst_buf1, dst_stride1); - - dec_predict_b_extend(pbi, xd, tile, 0, mi_row + hbs, mi_col + hbs, - mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top, - dst_buf2, dst_stride2, top_bsize, bsize2, 0, 0); - dec_extend_all(pbi, xd, tile, 0, bsize2, top_bsize, mi_row + hbs, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf2, - dst_stride2); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf1[i]; - xd->plane[i].dst.stride = dst_stride1[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - i); - } - break; -#endif // CONFIG_EXT_PARTITION_TYPES - default: assert(0); - } -} - -static void set_segment_id_supertx(const AV1_COMMON *const cm, int mi_row, - int mi_col, BLOCK_SIZE bsize) { - const struct segmentation *seg = &cm->seg; - const int miw = AOMMIN(mi_size_wide[bsize], cm->mi_cols - mi_col); - const int mih = AOMMIN(mi_size_high[bsize], cm->mi_rows - mi_row); - const int mi_offset = mi_row * cm->mi_stride + mi_col; - MODE_INFO **const mip = cm->mi_grid_visible + mi_offset; - int r, c; - int seg_id_supertx = MAX_SEGMENTS; - - if (!seg->enabled) { - seg_id_supertx = 0; - } else { - // Find the minimum segment_id - for (r = 0; r < mih; r++) - for (c = 0; c < miw; c++) - seg_id_supertx = - AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx); - assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS); - } - - // Assign the the segment_id back to segment_id_supertx - for (r = 0; r < mih; r++) - for (c = 0; c < miw; c++) - mip[r * cm->mi_stride + c]->mbmi.segment_id_supertx = seg_id_supertx; -} -#endif // CONFIG_SUPERTX - static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif // CONFIG_SUPERTX int mi_row, int mi_col, aom_reader *r, #if CONFIG_EXT_PARTITION_TYPES PARTITION_TYPE partition, @@ -1598,23 +705,11 @@ static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, #if CONFIG_ACCOUNTING aom_accounting_set_context(&pbi->accounting, mi_col, mi_row); #endif -#if CONFIG_SUPERTX - if (supertx_enabled) { - set_mb_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis); - } else { - set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis); - } -#if CONFIG_EXT_PARTITION_TYPES - xd->mi[0]->mbmi.partition = partition; -#endif - av1_read_mode_info(pbi, xd, supertx_enabled, mi_row, mi_col, r, x_mis, y_mis); -#else set_offsets(cm, xd, bsize, mi_row, mi_col, bw, bh, x_mis, y_mis); #if CONFIG_EXT_PARTITION_TYPES xd->mi[0]->mbmi.partition = partition; #endif av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis); -#endif // CONFIG_SUPERTX if (bsize >= BLOCK_8X8 && (cm->subsampling_x || cm->subsampling_y)) { const BLOCK_SIZE uv_subsize = ss_size_lookup[bsize][cm->subsampling_x][cm->subsampling_y]; @@ -1623,10 +718,6 @@ static void decode_mbmi_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, "Invalid block size."); } -#if CONFIG_SUPERTX - xd->mi[0]->mbmi.segment_id_supertx = MAX_SEGMENTS; -#endif // CONFIG_SUPERTX - int reader_corrupted_flag = aom_reader_has_error(r); aom_merge_corrupted_flag(&xd->corrupted, reader_corrupted_flag); } @@ -2175,29 +1266,19 @@ static void detoken_and_recon_sb(AV1Decoder *const pbi, MACROBLOCKD *const xd, #endif static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif // CONFIG_SUPERTX int mi_row, int mi_col, aom_reader *r, #if CONFIG_EXT_PARTITION_TYPES PARTITION_TYPE partition, #endif // CONFIG_EXT_PARTITION_TYPES BLOCK_SIZE bsize) { - decode_mbmi_block(pbi, xd, -#if CONFIG_SUPERTX - supertx_enabled, -#endif - mi_row, mi_col, r, + decode_mbmi_block(pbi, xd, mi_row, mi_col, r, #if CONFIG_EXT_PARTITION_TYPES partition, #endif bsize); #if !(CONFIG_MOTION_VAR && NC_MODE_INFO) -#if CONFIG_SUPERTX - if (!supertx_enabled) -#endif // CONFIG_SUPERTX - decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize); + decode_token_and_recon_block(pbi, xd, mi_row, mi_col, r, bsize); #endif } @@ -2248,31 +1329,8 @@ static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd, return p; } -#if CONFIG_SUPERTX -static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id, - aom_reader *r) { - if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) { - return 1; - } else { - const int ctx = av1_get_skip_context(xd); -#if CONFIG_NEW_MULTISYMBOL - FRAME_CONTEXT *ec_ctx = xd->tile_ctx; - const int skip = aom_read_symbol(r, ec_ctx->skip_cdfs[ctx], 2, ACCT_STR); -#else - const int skip = aom_read(r, cm->fc->skip_probs[ctx], ACCT_STR); -#endif - FRAME_COUNTS *counts = xd->counts; - if (counts) ++counts->skip[ctx][skip]; - return skip; - } -} -#endif // CONFIG_SUPERTX - // TODO(slavarnway): eliminate bsize and subsize in future commits static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif int mi_row, int mi_col, aom_reader *r, BLOCK_SIZE bsize) { AV1_COMMON *const cm = &pbi->common; @@ -2297,13 +1355,6 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd, #endif const int has_rows = (mi_row + hbs) < cm->mi_rows; const int has_cols = (mi_col + hbs) < cm->mi_cols; -#if CONFIG_SUPERTX - const int read_token = !supertx_enabled; - int skip = 0; - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - const TileInfo *const tile = &xd->tile; - int txfm = DCT_DCT; -#endif // CONFIG_SUPERTX if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; @@ -2325,25 +1376,8 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd, assert(partition < PARTITION_TYPES); assert(subsize < BLOCK_SIZES_ALL); #endif -#if CONFIG_SUPERTX - if (!frame_is_intra_only(cm) && partition != PARTITION_NONE && - bsize <= MAX_SUPERTX_BLOCK_SIZE && !supertx_enabled && !xd->lossless[0]) { - const int supertx_context = partition_supertx_context_lookup[partition]; - supertx_enabled = aom_read( - r, cm->fc->supertx_prob[supertx_context][supertx_size], ACCT_STR); - if (xd->counts) - xd->counts->supertx[supertx_context][supertx_size][supertx_enabled]++; -#if CONFIG_VAR_TX - if (supertx_enabled) xd->supertx_size = supertx_size; -#endif - } -#endif // CONFIG_SUPERTX -#if CONFIG_SUPERTX -#define DEC_BLOCK_STX_ARG supertx_enabled, -#else #define DEC_BLOCK_STX_ARG -#endif #if CONFIG_EXT_PARTITION_TYPES #define DEC_BLOCK_EPT_ARG partition, #else @@ -2449,108 +1483,6 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd, #undef DEC_BLOCK_EPT_ARG #undef DEC_BLOCK_STX_ARG -#if CONFIG_SUPERTX - if (supertx_enabled && read_token) { - uint8_t *dst_buf[3]; - int dst_stride[3], i; - int offset = mi_row * cm->mi_stride + mi_col; - - set_segment_id_supertx(cm, mi_row, mi_col, bsize); - - if (cm->delta_q_present_flag) { - for (i = 0; i < MAX_SEGMENTS; i++) { - int j; - for (j = 0; j < MAX_MB_PLANE; ++j) { - const int dc_delta_q = j == 0 ? cm->y_dc_delta_q : cm->uv_dc_delta_q; - const int ac_delta_q = j == 0 ? 0 : cm->uv_ac_delta_q; - - xd->plane[j].seg_dequant[i][0] = - av1_dc_quant(xd->current_qindex, dc_delta_q, cm->bit_depth); - xd->plane[j].seg_dequant[i][1] = - av1_ac_quant(xd->current_qindex, ac_delta_q, cm->bit_depth); - } - } - } - - xd->mi = cm->mi_grid_visible + offset; - xd->mi[0] = cm->mi + offset; - set_mi_row_col(xd, tile, mi_row, mi_size_high[bsize], mi_col, - mi_size_wide[bsize], -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - set_skip_context(xd, mi_row, mi_col); - skip = read_skip(cm, xd, xd->mi[0]->mbmi.segment_id_supertx, r); - if (skip) { - av1_reset_skip_context(xd, mi_row, mi_col, bsize); - } else { - FRAME_CONTEXT *ec_ctx = xd->tile_ctx; -#if CONFIG_EXT_TX - if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) > - 1) { - const int eset = - get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used); - if (eset > 0) { - const TxSetType tx_set_type = get_ext_tx_set_type( - supertx_size, bsize, 1, cm->reduced_tx_set_used); - const int packed_sym = - aom_read_symbol(r, ec_ctx->inter_ext_tx_cdf[eset][supertx_size], - av1_num_ext_tx_set[tx_set_type], ACCT_STR); - txfm = av1_ext_tx_inv[tx_set_type][packed_sym]; -#if CONFIG_ENTROPY_STATS - if (xd->counts) ++xd->counts->inter_ext_tx[eset][supertx_size][txfm]; -#endif // CONFIG_ENTROPY_STATS - } - } -#else - if (supertx_size < TX_32X32) { - txfm = aom_read_symbol(r, ec_ctx->inter_ext_tx_cdf[supertx_size], - TX_TYPES, ACCT_STR); -#if CONFIG_ENTROPY_STATS - if (xd->counts) ++xd->counts->inter_ext_tx[supertx_size][txfm]; -#endif // CONFIG_ENTROPY_STATS - } -#endif // CONFIG_EXT_TX - } - - av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row, - mi_col); - for (i = 0; i < MAX_MB_PLANE; i++) { - dst_buf[i] = xd->plane[i].dst.buf; - dst_stride[i] = xd->plane[i].dst.stride; - } - dec_predict_sb_complex(pbi, xd, tile, mi_row, mi_col, mi_row, mi_col, bsize, - bsize, dst_buf, dst_stride); - - if (!skip) { - int eobtotal = 0; - MB_MODE_INFO *mbmi; - set_offsets_topblock(cm, xd, tile, bsize, mi_row, mi_col); - mbmi = &xd->mi[0]->mbmi; - mbmi->tx_type = txfm; - assert(mbmi->segment_id_supertx != MAX_SEGMENTS); - for (i = 0; i < MAX_MB_PLANE; ++i) { - const struct macroblockd_plane *const pd = &xd->plane[i]; - int row, col; - const TX_SIZE tx_size = av1_get_tx_size(i, xd); - const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd); - const int stepr = tx_size_high_unit[tx_size]; - const int stepc = tx_size_wide_unit[tx_size]; - const int max_blocks_wide = max_block_wide(xd, plane_bsize, i); - const int max_blocks_high = max_block_high(xd, plane_bsize, i); - - for (row = 0; row < max_blocks_high; row += stepr) - for (col = 0; col < max_blocks_wide; col += stepc) - eobtotal += reconstruct_inter_block( - cm, xd, r, mbmi->segment_id_supertx, i, row, col, tx_size); - } - if ((unify_bsize || !(subsize < BLOCK_8X8)) && eobtotal == 0) skip = 1; - } - set_param_topblock(cm, xd, bsize, mi_row, mi_col, txfm, skip); - } -#endif // CONFIG_SUPERTX - #if CONFIG_EXT_PARTITION_TYPES update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition); #else @@ -3951,11 +2883,8 @@ static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data, alloc_ncobmc_pred_buffer(&td->xd); set_sb_mi_boundaries(cm, &td->xd, mi_row, mi_col); #endif - decode_partition(pbi, &td->xd, -#if CONFIG_SUPERTX - 0, -#endif // CONFIG_SUPERTX - mi_row, mi_col, &td->bit_reader, cm->sb_size); + decode_partition(pbi, &td->xd, mi_row, mi_col, &td->bit_reader, + cm->sb_size); #if NC_MODE_INFO && CONFIG_MOTION_VAR detoken_and_recon_sb(pbi, &td->xd, mi_row, mi_col, &td->bit_reader, cm->sb_size); @@ -4134,11 +3063,8 @@ static int tile_worker_hook(TileWorkerData *const tile_data, for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end; mi_col += cm->mib_size) { - decode_partition(pbi, &tile_data->xd, -#if CONFIG_SUPERTX - 0, -#endif - mi_row, mi_col, &tile_data->bit_reader, cm->sb_size); + decode_partition(pbi, &tile_data->xd, mi_row, mi_col, + &tile_data->bit_reader, cm->sb_size); #if NC_MODE_INFO && CONFIG_MOTION_VAR detoken_and_recon_sb(pbi, &tile_data->xd, mi_row, mi_col, &tile_data->bit_reader, cm->sb_size); @@ -5218,19 +4144,6 @@ static size_t read_uncompressed_header(AV1Decoder *pbi, return sz; } -#if CONFIG_SUPERTX && !CONFIG_RESTRICT_COMPRESSED_HDR -static void read_supertx_probs(FRAME_CONTEXT *fc, aom_reader *r) { - int i, j; - if (aom_read(r, GROUP_DIFF_UPDATE_PROB, ACCT_STR)) { - for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) { - for (j = TX_8X8; j < TX_SIZES; ++j) { - av1_diff_update_prob(r, &fc->supertx_prob[i][j], ACCT_STR); - } - } - } -} -#endif // CONFIG_SUPERTX - static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, size_t partition_size) { #if CONFIG_RESTRICT_COMPRESSED_HDR @@ -5240,14 +4153,11 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, return 0; #else AV1_COMMON *const cm = &pbi->common; -#if CONFIG_SUPERTX - MACROBLOCKD *const xd = &pbi->mb; -#endif aom_reader r; #if ((CONFIG_RECT_TX_EXT && (CONFIG_EXT_TX || CONFIG_VAR_TX)) || \ (!CONFIG_NEW_MULTISYMBOL || CONFIG_LV_MAP) || \ - (CONFIG_COMPOUND_SINGLEREF || CONFIG_SUPERTX)) + (CONFIG_COMPOUND_SINGLEREF)) FRAME_CONTEXT *const fc = cm->fc; #endif @@ -5331,9 +4241,6 @@ static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data, #if CONFIG_AMVR } #endif -#endif -#if CONFIG_SUPERTX - if (!xd->lossless[0]) read_supertx_probs(fc, &r); #endif } diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c index a5f59c90b..5fff1b63c 100644 --- a/av1/decoder/decodemv.c +++ b/av1/decoder/decodemv.c @@ -947,9 +947,6 @@ static void read_intra_angle_info(MACROBLOCKD *const xd, aom_reader *r) { #endif // CONFIG_EXT_INTRA void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif #if CONFIG_TXK_SEL int blk_row, int blk_col, int block, int plane, TX_SIZE tx_size, @@ -986,9 +983,6 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, ((!cm->seg.enabled && cm->base_qindex > 0) || (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) && !mbmi->skip && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif // CONFIG_SUPERTX !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { const TxSetType tx_set_type = get_ext_tx_set_type( tx_size, mbmi->sb_type, inter_block, cm->reduced_tx_set_used); @@ -1080,9 +1074,6 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, ((!cm->seg.enabled && cm->base_qindex > 0) || (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) && !mbmi->skip && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif // CONFIG_SUPERTX !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { #if CONFIG_ENTROPY_STATS FRAME_COUNTS *counts = xd->counts; @@ -1172,11 +1163,7 @@ static void read_intrabc_info(AV1_COMMON *const cm, MACROBLOCKD *const xd, mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); #endif // CONFIG_VAR_TX #if CONFIG_EXT_TX && !CONFIG_TXK_SEL - av1_read_tx_type(cm, xd, -#if CONFIG_SUPERTX - 0, -#endif - r); + av1_read_tx_type(cm, xd, r); #endif // CONFIG_EXT_TX && !CONFIG_TXK_SEL } } @@ -1327,11 +1314,7 @@ static void read_intra_frame_mode_info(AV1_COMMON *const cm, #endif // CONFIG_FILTER_INTRA #if !CONFIG_TXK_SEL - av1_read_tx_type(cm, xd, -#if CONFIG_SUPERTX - 0, -#endif - r); + av1_read_tx_type(cm, xd, r); #endif // !CONFIG_TXK_SEL } @@ -2280,13 +2263,8 @@ static void dec_dump_logs(AV1_COMMON *cm, MODE_INFO *const mi, int mi_row, static void read_inter_block_mode_info(AV1Decoder *const pbi, MACROBLOCKD *const xd, - MODE_INFO *const mi, -#if CONFIG_SUPERTX - int mi_row, int mi_col, aom_reader *r, - int supertx_enabled) { -#else - int mi_row, int mi_col, aom_reader *r) { -#endif // CONFIG_MOTION_VAR && CONFIG_SUPERTX + MODE_INFO *const mi, int mi_row, + int mi_col, aom_reader *r) { AV1_COMMON *const cm = &pbi->common; MB_MODE_INFO *const mbmi = &mi->mbmi; const BLOCK_SIZE bsize = mbmi->sb_type; @@ -2705,9 +2683,6 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, #if CONFIG_INTERINTRA mbmi->use_wedge_interintra = 0; if (cm->reference_mode != COMPOUND_REFERENCE && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif cm->allow_interintra_compound && is_interintra_allowed(mbmi)) { const int bsize_group = size_group_lookup[bsize]; #if CONFIG_NEW_MULTISYMBOL @@ -2776,39 +2751,33 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, av1_count_overlappable_neighbors(cm, xd, mi_row, mi_col); #endif -#if CONFIG_SUPERTX - if (!supertx_enabled) { -#endif // CONFIG_SUPERTX - if (mbmi->ref_frame[1] != INTRA_FRAME) - mbmi->motion_mode = read_motion_mode(cm, xd, mi, r); + if (mbmi->ref_frame[1] != INTRA_FRAME) + mbmi->motion_mode = read_motion_mode(cm, xd, mi, r); #if CONFIG_NCOBMC_ADAPT_WEIGHT - read_ncobmc_mode(xd, mi, mbmi->ncobmc_mode, r); + read_ncobmc_mode(xd, mi, mbmi->ncobmc_mode, r); #endif #if CONFIG_COMPOUND_SINGLEREF - if (is_singleref_comp_mode) assert(mbmi->motion_mode == SIMPLE_TRANSLATION); + if (is_singleref_comp_mode) assert(mbmi->motion_mode == SIMPLE_TRANSLATION); #endif // CONFIG_COMPOUND_SINGLEREF #if CONFIG_WARPED_MOTION - if (mbmi->motion_mode == WARPED_CAUSAL) { - mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE; + if (mbmi->motion_mode == WARPED_CAUSAL) { + mbmi->wm_params[0].wmtype = DEFAULT_WMTYPE; #if CONFIG_EXT_WARPED_MOTION - if (mbmi->num_proj_ref[0] > 1) - mbmi->num_proj_ref[0] = sortSamples(pts_mv, &mbmi->mv[0].as_mv, pts, - pts_inref, mbmi->num_proj_ref[0]); + if (mbmi->num_proj_ref[0] > 1) + mbmi->num_proj_ref[0] = sortSamples(pts_mv, &mbmi->mv[0].as_mv, pts, + pts_inref, mbmi->num_proj_ref[0]); #endif // CONFIG_EXT_WARPED_MOTION - if (find_projection(mbmi->num_proj_ref[0], pts, pts_inref, bsize, - mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col, - &mbmi->wm_params[0], mi_row, mi_col)) { - aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid Warped Model"); - } + if (find_projection(mbmi->num_proj_ref[0], pts, pts_inref, bsize, + mbmi->mv[0].as_mv.row, mbmi->mv[0].as_mv.col, + &mbmi->wm_params[0], mi_row, mi_col)) { + aom_internal_error(&cm->error, AOM_CODEC_ERROR, "Invalid Warped Model"); } -#endif // CONFIG_WARPED_MOTION -#if CONFIG_SUPERTX } -#endif // CONFIG_SUPERTX +#endif // CONFIG_WARPED_MOTION #endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION mbmi->interinter_compound_type = COMPOUND_AVERAGE; @@ -2866,11 +2835,8 @@ static void read_inter_block_mode_info(AV1Decoder *const pbi, } static void read_inter_frame_mode_info(AV1Decoder *const pbi, - MACROBLOCKD *const xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif // CONFIG_SUPERTX - int mi_row, int mi_col, aom_reader *r) { + MACROBLOCKD *const xd, int mi_row, + int mi_col, aom_reader *r) { AV1_COMMON *const cm = &pbi->common; MODE_INFO *const mi = xd->mi[0]; MB_MODE_INFO *const mbmi = &mi->mbmi; @@ -2882,10 +2848,7 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi, mbmi->mv[0].as_int = 0; mbmi->mv[1].as_int = 0; mbmi->segment_id = read_inter_segment_id(cm, xd, mi_row, mi_col, r); -#if CONFIG_SUPERTX - if (!supertx_enabled) -#endif // CONFIG_SUPERTX - mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); + mbmi->skip = read_skip(cm, xd, mbmi->segment_id, r); if (cm->delta_q_present_flag) { xd->current_qindex = @@ -2925,111 +2888,85 @@ static void read_inter_frame_mode_info(AV1Decoder *const pbi, #endif } -#if CONFIG_SUPERTX - if (!supertx_enabled) { -#endif // CONFIG_SUPERTX - inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r); + inter_block = read_is_inter_block(cm, xd, mbmi->segment_id, r); #if CONFIG_VAR_TX - xd->above_txfm_context = - cm->above_txfm_context + (mi_col << TX_UNIT_WIDE_LOG2); - xd->left_txfm_context = xd->left_txfm_context_buffer + - ((mi_row & MAX_MIB_MASK) << TX_UNIT_HIGH_LOG2); + xd->above_txfm_context = + cm->above_txfm_context + (mi_col << TX_UNIT_WIDE_LOG2); + xd->left_txfm_context = xd->left_txfm_context_buffer + + ((mi_row & MAX_MIB_MASK) << TX_UNIT_HIGH_LOG2); - if (cm->tx_mode == TX_MODE_SELECT && + if (cm->tx_mode == TX_MODE_SELECT && #if CONFIG_CB4X4 - bsize > BLOCK_4X4 && + bsize > BLOCK_4X4 && #else - bsize >= BLOCK_8X8 && + bsize >= BLOCK_8X8 && #endif - !mbmi->skip && inter_block && !xd->lossless[mbmi->segment_id]) { - const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize]; - const int bh = tx_size_high_unit[max_tx_size]; - const int bw = tx_size_wide_unit[max_tx_size]; - const int width = block_size_wide[bsize] >> tx_size_wide_log2[0]; - const int height = block_size_high[bsize] >> tx_size_wide_log2[0]; - int idx, idy; - int init_depth = - (height != width) ? RECT_VARTX_DEPTH_INIT : SQR_VARTX_DEPTH_INIT; - - mbmi->min_tx_size = TX_SIZES_ALL; - for (idy = 0; idy < height; idy += bh) - for (idx = 0; idx < width; idx += bw) - read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size, init_depth, - idy, idx, r); + !mbmi->skip && inter_block && !xd->lossless[mbmi->segment_id]) { + const TX_SIZE max_tx_size = max_txsize_rect_lookup[bsize]; + const int bh = tx_size_high_unit[max_tx_size]; + const int bw = tx_size_wide_unit[max_tx_size]; + const int width = block_size_wide[bsize] >> tx_size_wide_log2[0]; + const int height = block_size_high[bsize] >> tx_size_wide_log2[0]; + int idx, idy; + int init_depth = + (height != width) ? RECT_VARTX_DEPTH_INIT : SQR_VARTX_DEPTH_INIT; + + mbmi->min_tx_size = TX_SIZES_ALL; + for (idy = 0; idy < height; idy += bh) + for (idx = 0; idx < width; idx += bw) + read_tx_size_vartx(cm, xd, mbmi, xd->counts, max_tx_size, init_depth, + idy, idx, r); #if CONFIG_RECT_TX_EXT - if (is_quarter_tx_allowed(xd, mbmi, inter_block) && - mbmi->tx_size == max_tx_size) { - int quarter_tx; + if (is_quarter_tx_allowed(xd, mbmi, inter_block) && + mbmi->tx_size == max_tx_size) { + int quarter_tx; - if (quarter_txsize_lookup[bsize] != max_tx_size) { + if (quarter_txsize_lookup[bsize] != max_tx_size) { #if CONFIG_NEW_MULTISYMBOL - quarter_tx = - aom_read_symbol(r, cm->fc->quarter_tx_size_cdf, 2, ACCT_STR); + quarter_tx = + aom_read_symbol(r, cm->fc->quarter_tx_size_cdf, 2, ACCT_STR); #else - quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR); - if (xd->counts) ++xd->counts->quarter_tx_size[quarter_tx]; + quarter_tx = aom_read(r, cm->fc->quarter_tx_size_prob, ACCT_STR); + if (xd->counts) ++xd->counts->quarter_tx_size[quarter_tx]; #endif - } else { - quarter_tx = 1; - } - if (quarter_tx) { - mbmi->tx_size = quarter_txsize_lookup[bsize]; - for (idy = 0; idy < tx_size_high_unit[max_tx_size] / 2; ++idy) - for (idx = 0; idx < tx_size_wide_unit[max_tx_size] / 2; ++idx) - mbmi->inter_tx_size[idy][idx] = mbmi->tx_size; - mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); - } + } else { + quarter_tx = 1; } + if (quarter_tx) { + mbmi->tx_size = quarter_txsize_lookup[bsize]; + for (idy = 0; idy < tx_size_high_unit[max_tx_size] / 2; ++idy) + for (idx = 0; idx < tx_size_wide_unit[max_tx_size] / 2; ++idx) + mbmi->inter_tx_size[idy][idx] = mbmi->tx_size; + mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); + } + } #endif - } else { - mbmi->tx_size = read_tx_size(cm, xd, inter_block, !mbmi->skip, r); + } else { + mbmi->tx_size = read_tx_size(cm, xd, inter_block, !mbmi->skip, r); - if (inter_block) { - const int width = block_size_wide[bsize] >> tx_size_wide_log2[0]; - const int height = block_size_high[bsize] >> tx_size_high_log2[0]; - int idx, idy; - for (idy = 0; idy < height; ++idy) - for (idx = 0; idx < width; ++idx) - mbmi->inter_tx_size[idy >> 1][idx >> 1] = mbmi->tx_size; - } - mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); - set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, mbmi->skip, xd); + if (inter_block) { + const int width = block_size_wide[bsize] >> tx_size_wide_log2[0]; + const int height = block_size_high[bsize] >> tx_size_high_log2[0]; + int idx, idy; + for (idy = 0; idy < height; ++idy) + for (idx = 0; idx < width; ++idx) + mbmi->inter_tx_size[idy >> 1][idx >> 1] = mbmi->tx_size; } + mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); + set_txfm_ctxs(mbmi->tx_size, xd->n8_w, xd->n8_h, mbmi->skip, xd); + } #else mbmi->tx_size = read_tx_size(cm, xd, inter_block, !mbmi->skip, r); #endif // CONFIG_VAR_TX -#if CONFIG_SUPERTX - } -#if CONFIG_VAR_TX - else if (inter_block) { - const int width = num_4x4_blocks_wide_lookup[bsize]; - const int height = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - xd->mi[0]->mbmi.tx_size = xd->supertx_size; - for (idy = 0; idy < height; ++idy) - for (idx = 0; idx < width; ++idx) - xd->mi[0]->mbmi.inter_tx_size[idy >> 1][idx >> 1] = xd->supertx_size; - } -#endif // CONFIG_VAR_TX -#endif // CONFIG_SUPERTX if (inter_block) - read_inter_block_mode_info(pbi, xd, -#if CONFIG_SUPERTX - mi, mi_row, mi_col, r, supertx_enabled); -#else - mi, mi_row, mi_col, r); -#endif // CONFIG_MOTION_VAR && CONFIG_SUPERTX + read_inter_block_mode_info(pbi, xd, mi, mi_row, mi_col, r); else read_intra_block_mode_info(cm, mi_row, mi_col, xd, mi, r); #if !CONFIG_TXK_SEL - av1_read_tx_type(cm, xd, -#if CONFIG_SUPERTX - supertx_enabled, -#endif - r); + av1_read_tx_type(cm, xd, r); #endif // !CONFIG_TXK_SEL } @@ -3060,12 +2997,8 @@ static void av1_intra_copy_frame_mvs(AV1_COMMON *const cm, int mi_row, } } -void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif // CONFIG_SUPERTX - int mi_row, int mi_col, aom_reader *r, int x_mis, - int y_mis) { +void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, int mi_row, + int mi_col, aom_reader *r, int x_mis, int y_mis) { AV1_COMMON *const cm = &pbi->common; MODE_INFO *const mi = xd->mi[0]; #if CONFIG_INTRABC @@ -3076,11 +3009,7 @@ void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r); av1_intra_copy_frame_mvs(cm, mi_row, mi_col, x_mis, y_mis); } else { - read_inter_frame_mode_info(pbi, xd, -#if CONFIG_SUPERTX - supertx_enabled, -#endif // CONFIG_SUPERTX - mi_row, mi_col, r); + read_inter_frame_mode_info(pbi, xd, mi_row, mi_col, r); av1_copy_frame_mvs(cm, mi, mi_row, mi_col, x_mis, y_mis); } } diff --git a/av1/decoder/decodemv.h b/av1/decoder/decodemv.h index 162cf3254..a4d383d52 100644 --- a/av1/decoder/decodemv.h +++ b/av1/decoder/decodemv.h @@ -21,9 +21,6 @@ extern "C" { #endif void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif int mi_row, int mi_col, aom_reader *r, int x_mis, int y_mis); @@ -33,9 +30,6 @@ void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, #endif void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif #if CONFIG_TXK_SEL int blk_row, int blk_col, int block, int plane, TX_SIZE tx_size, diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c index 7a67092ea..6185425ff 100644 --- a/av1/encoder/bitstream.c +++ b/av1/encoder/bitstream.c @@ -570,31 +570,6 @@ static void pack_map_tokens(aom_writer *w, const TOKENEXTRA **tp, int n, #endif // !CONFIG_PVQ #if !CONFIG_PVQ -#if CONFIG_SUPERTX -static void update_supertx_probs(AV1_COMMON *cm, int probwt, aom_writer *w) { - const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) - - av1_cost_zero(GROUP_DIFF_UPDATE_PROB); - int i, j; - int savings = 0; - int do_update = 0; - for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) { - for (j = TX_8X8; j < TX_SIZES; ++j) { - savings += av1_cond_prob_diff_update_savings( - &cm->fc->supertx_prob[i][j], cm->counts.supertx[i][j], probwt); - } - } - do_update = savings > savings_thresh; - aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB); - if (do_update) { - for (i = 0; i < PARTITION_SUPERTX_CONTEXTS; ++i) { - for (j = TX_8X8; j < TX_SIZES; ++j) { - av1_cond_prob_diff_update(w, &cm->fc->supertx_prob[i][j], - cm->counts.supertx[i][j], probwt); - } - } - } -} -#endif // CONFIG_SUPERTX #if !CONFIG_LV_MAP #if CONFIG_NEW_MULTISYMBOL @@ -1496,9 +1471,6 @@ static void write_palette_mode_info(const AV1_COMMON *cm, const MACROBLOCKD *xd, } void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd, -#if CONFIG_SUPERTX - const int supertx_enabled, -#endif #if CONFIG_TXK_SEL int blk_row, int blk_col, int block, int plane, TX_SIZE tx_size, @@ -1534,9 +1506,6 @@ void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd, ((!cm->seg.enabled && cm->base_qindex > 0) || (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) && !mbmi->skip && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif // CONFIG_SUPERTX !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { #if CONFIG_MRC_TX if (tx_type == MRC_DCT) @@ -1595,14 +1564,11 @@ void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd, } #endif // CONFIG_LGT_FROM_PRED } -#else // CONFIG_EXT_TX +#else // CONFIG_EXT_TX if (tx_size < TX_32X32 && ((!cm->seg.enabled && cm->base_qindex > 0) || (cm->seg.enabled && xd->qindex[mbmi->segment_id] > 0)) && !mbmi->skip && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif // CONFIG_SUPERTX !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { if (is_inter) { aom_write_symbol(w, av1_ext_tx_ind[tx_type], @@ -1651,11 +1617,7 @@ static void write_cfl_alphas(FRAME_CONTEXT *const ec_ctx, int idx, #endif static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, - const int mi_col, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif - aom_writer *w) { + const int mi_col, aom_writer *w) { AV1_COMMON *const cm = &cpi->common; MACROBLOCK *const x = &cpi->td.mb; MACROBLOCKD *const xd = &x->e_mbd; @@ -1697,14 +1659,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, } } -#if CONFIG_SUPERTX - if (supertx_enabled) - skip = mbmi->skip; - else - skip = write_skip(cm, xd, segment_id, mi, w); -#else skip = write_skip(cm, xd, segment_id, mi, w); -#endif // CONFIG_SUPERTX if (cm->delta_q_present_flag) { int super_block_upper_left = ((mi_row & MAX_MIB_MASK) == 0) && ((mi_col & MAX_MIB_MASK) == 0); @@ -1746,10 +1701,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, } } -#if CONFIG_SUPERTX - if (!supertx_enabled) -#endif // CONFIG_SUPERTX - write_is_inter(cm, xd, mbmi->segment_id, w, is_inter); + write_is_inter(cm, xd, mbmi->segment_id, w, is_inter); if (cm->tx_mode == TX_MODE_SELECT && #if CONFIG_CB4X4 && CONFIG_VAR_TX && !CONFIG_RECT_TX @@ -1757,9 +1709,6 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, #else block_signals_txsize(bsize) && #endif -#if CONFIG_SUPERTX - !supertx_enabled && -#endif // CONFIG_SUPERTX !(is_inter && skip) && !xd->lossless[segment_id]) { #if CONFIG_VAR_TX if (is_inter) { // This implies skip flag is 0. @@ -1993,9 +1942,6 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, #if CONFIG_INTERINTRA if (cpi->common.reference_mode != COMPOUND_REFERENCE && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif // CONFIG_SUPERTX cpi->common.allow_interintra_compound && is_interintra_allowed(mbmi)) { const int interintra = mbmi->ref_frame[1] == INTRA_FRAME; const int bsize_group = size_group_lookup[bsize]; @@ -2027,10 +1973,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, #endif // CONFIG_INTERINTRA #if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION -#if CONFIG_SUPERTX - if (!supertx_enabled) -#endif // CONFIG_SUPERTX - if (mbmi->ref_frame[1] != INTRA_FRAME) write_motion_mode(cm, xd, mi, w); + if (mbmi->ref_frame[1] != INTRA_FRAME) write_motion_mode(cm, xd, mi, w); #if CONFIG_NCOBMC_ADAPT_WEIGHT write_ncobmc_mode(xd, mi, w); #endif @@ -2078,11 +2021,7 @@ static void pack_inter_mode_mvs(AV1_COMP *cpi, const int mi_row, } #if !CONFIG_TXK_SEL - av1_write_tx_type(cm, xd, -#if CONFIG_SUPERTX - supertx_enabled, -#endif - w); + av1_write_tx_type(cm, xd, w); #endif // !CONFIG_TXK_SEL } @@ -2101,11 +2040,7 @@ static void write_intrabc_info(AV1_COMMON *cm, MACROBLOCKD *xd, int_mv dv_ref = mbmi_ext->ref_mvs[INTRA_FRAME][0]; av1_encode_dv(w, &mbmi->mv[0].as_mv, &dv_ref.as_mv, &ec_ctx->ndvc); #if CONFIG_EXT_TX && !CONFIG_TXK_SEL - av1_write_tx_type(cm, xd, -#if CONFIG_SUPERTX - 0, -#endif - w); + av1_write_tx_type(cm, xd, w); #endif // CONFIG_EXT_TX && !CONFIG_TXK_SEL } } @@ -2234,23 +2169,13 @@ static void write_mb_modes_kf(AV1_COMMON *cm, MACROBLOCKD *xd, #endif // CONFIG_FILTER_INTRA #if !CONFIG_TXK_SEL - av1_write_tx_type(cm, xd, -#if CONFIG_SUPERTX - 0, -#endif - w); + av1_write_tx_type(cm, xd, w); #endif // !CONFIG_TXK_SEL } -#if CONFIG_SUPERTX -#define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \ - mi_row, mi_col) \ - write_modes_b(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col) -#else #define write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \ mi_row, mi_col) \ write_modes_b(cpi, tile, w, tok, tok_end, mi_row, mi_col) -#endif // CONFIG_SUPERTX #if CONFIG_RD_DEBUG static void dump_mode_info(MODE_INFO *mi) { @@ -2362,11 +2287,7 @@ static void enc_dump_logs(AV1_COMP *cpi, int mi_row, int mi_col) { #endif // ENC_MISMATCH_DEBUG static void write_mbmi_b(AV1_COMP *cpi, const TileInfo *const tile, - aom_writer *w, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif - int mi_row, int mi_col) { + aom_writer *w, int mi_row, int mi_col) { AV1_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; MODE_INFO *m; @@ -2416,11 +2337,7 @@ static void write_mbmi_b(AV1_COMP *cpi, const TileInfo *const tile, enc_dump_logs(cpi, mi_row, mi_col); #endif // ENC_MISMATCH_DEBUG - pack_inter_mode_mvs(cpi, mi_row, mi_col, -#if CONFIG_SUPERTX - supertx_enabled, -#endif - w); + pack_inter_mode_mvs(cpi, mi_row, mi_col, w); } } @@ -2795,25 +2712,15 @@ static void write_tokens_sb(AV1_COMP *cpi, const TileInfo *const tile, static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile, aom_writer *w, const TOKENEXTRA **tok, - const TOKENEXTRA *const tok_end, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif - int mi_row, int mi_col) { - write_mbmi_b(cpi, tile, w, -#if CONFIG_SUPERTX - supertx_enabled, -#endif - mi_row, mi_col); + const TOKENEXTRA *const tok_end, int mi_row, + int mi_col) { + write_mbmi_b(cpi, tile, w, mi_row, mi_col); #if CONFIG_MOTION_VAR && NC_MODE_INFO (void)tok; (void)tok_end; #else -#if !CONFIG_PVQ && CONFIG_SUPERTX - if (!supertx_enabled) -#endif - write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); + write_tokens_b(cpi, tile, w, tok, tok_end, mi_row, mi_col); #endif } @@ -2863,24 +2770,14 @@ static void write_partition(const AV1_COMMON *const cm, } } -#if CONFIG_SUPERTX -#define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \ - mi_row, mi_col, bsize) \ - write_modes_sb(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col, \ - bsize) -#else #define write_modes_sb_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, \ mi_row, mi_col, bsize) \ write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, bsize) -#endif // CONFIG_SUPERTX static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile, aom_writer *const w, const TOKENEXTRA **tok, - const TOKENEXTRA *const tok_end, -#if CONFIG_SUPERTX - int supertx_enabled, -#endif - int mi_row, int mi_col, BLOCK_SIZE bsize) { + const TOKENEXTRA *const tok_end, int mi_row, + int mi_col, BLOCK_SIZE bsize) { const AV1_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; const int hbs = mi_size_wide[bsize] / 2; @@ -2899,36 +2796,9 @@ static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile, const int unify_bsize = 0; #endif -#if CONFIG_SUPERTX - const int mi_offset = mi_row * cm->mi_stride + mi_col; - MB_MODE_INFO *mbmi; - const int pack_token = !supertx_enabled; - TX_SIZE supertx_size; -#endif - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; write_partition(cm, xd, hbs, mi_row, mi_col, partition, bsize, w); -#if CONFIG_SUPERTX - mbmi = &cm->mi_grid_visible[mi_offset]->mbmi; - xd->mi = cm->mi_grid_visible + mi_offset; - set_mi_row_col(xd, tile, mi_row, mi_size_high[bsize], mi_col, - mi_size_wide[bsize], -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - if (!supertx_enabled && !frame_is_intra_only(cm) && - partition != PARTITION_NONE && bsize <= MAX_SUPERTX_BLOCK_SIZE && - !xd->lossless[0]) { - aom_prob prob; - supertx_size = max_txsize_lookup[bsize]; - prob = cm->fc->supertx_prob[partition_supertx_context_lookup[partition]] - [supertx_size]; - supertx_enabled = (xd->mi[0]->mbmi.tx_size == supertx_size); - aom_write(w, supertx_enabled, prob); - } -#endif // CONFIG_SUPERTX if (subsize < BLOCK_8X8 && !unify_bsize) { write_modes_b_wrapper(cpi, tile, w, tok, tok_end, supertx_enabled, mi_row, mi_col); @@ -3054,89 +2924,6 @@ static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile, default: assert(0); } } -#if CONFIG_SUPERTX - if (partition != PARTITION_NONE && supertx_enabled && pack_token) { - int skip; - const int bsw = mi_size_wide[bsize]; - const int bsh = mi_size_high[bsize]; - - xd->mi = cm->mi_grid_visible + mi_offset; - supertx_size = mbmi->tx_size; - set_mi_row_col(xd, tile, mi_row, bsh, mi_col, bsw, -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - - assert(IMPLIES(!cm->seg.enabled, mbmi->segment_id_supertx == 0)); - assert(mbmi->segment_id_supertx < MAX_SEGMENTS); - - skip = write_skip(cm, xd, mbmi->segment_id_supertx, xd->mi[0], w); - - FRAME_CONTEXT *ec_ctx = xd->tile_ctx; - -#if CONFIG_EXT_TX - if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) > 1 && - !skip) { - const int eset = - get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used); - const int tx_set_type = - get_ext_tx_set_type(supertx_size, bsize, 1, cm->reduced_tx_set_used); - if (eset > 0) { - aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][mbmi->tx_type], - ec_ctx->inter_ext_tx_cdf[eset][supertx_size], - av1_num_ext_tx_set[tx_set_type]); - } - } -#else - if (supertx_size < TX_32X32 && !skip) { - aom_write_symbol(w, mbmi->tx_type, ec_ctx->inter_ext_tx_cdf[supertx_size], - TX_TYPES); - } -#endif // CONFIG_EXT_TX - - if (!skip) { - assert(*tok < tok_end); - for (int plane = 0; plane < MAX_MB_PLANE; ++plane) { -#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK - TX_TYPE tx_type = av1_get_tx_type(plane ? PLANE_TYPE_UV : PLANE_TYPE_Y, - xd, blk_row, blk_col, block, tx_size); -#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK - const struct macroblockd_plane *const pd = &xd->plane[plane]; - const int mbmi_txb_size = txsize_to_bsize[mbmi->tx_size]; - const BLOCK_SIZE plane_bsize = get_plane_block_size(mbmi_txb_size, pd); - - const int max_blocks_wide = max_block_wide(xd, plane_bsize, plane); - const int max_blocks_high = max_block_high(xd, plane_bsize, plane); - - int row, col; - const TX_SIZE tx = av1_get_tx_size(plane, xd); - BLOCK_SIZE txb_size = txsize_to_bsize[tx]; - - const int stepr = tx_size_high_unit[txb_size]; - const int stepc = tx_size_wide_unit[txb_size]; - - TOKEN_STATS token_stats; - token_stats.cost = 0; - for (row = 0; row < max_blocks_high; row += stepr) - for (col = 0; col < max_blocks_wide; col += stepc) - pack_mb_tokens(w, tok, tok_end, cm->bit_depth, tx, -#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK - tx_type, is_inter_block(mbmi), -#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK - &token_stats); - assert(*tok < tok_end && (*tok)->token == EOSB_TOKEN); - (*tok)++; - } - } -#if CONFIG_VAR_TX - xd->above_txfm_context = cm->above_txfm_context + mi_col; - xd->left_txfm_context = - xd->left_txfm_context_buffer + (mi_row & MAX_MIB_MASK); - set_txfm_ctxs(xd->mi[0]->mbmi.tx_size, bsw, bsh, skip, xd); -#endif - } -#endif // CONFIG_SUPERTX // update partition context #if CONFIG_EXT_PARTITION_TYPES @@ -5144,9 +4931,6 @@ static void write_uncompressed_header_obu(AV1_COMP *cpi, static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) { AV1_COMMON *const cm = &cpi->common; -#if CONFIG_SUPERTX - MACROBLOCKD *const xd = &cpi->td.mb.e_mbd; -#endif // CONFIG_SUPERTX FRAME_CONTEXT *const fc = cm->fc; aom_writer *header_bc; @@ -5271,9 +5055,6 @@ static uint32_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) { #if !CONFIG_NEW_MULTISYMBOL av1_write_nmv_probs(cm, cm->allow_high_precision_mv, header_bc, counts->mv); #endif -#if CONFIG_SUPERTX - if (!xd->lossless[0]) update_supertx_probs(cm, probwt, header_bc); -#endif // CONFIG_SUPERTX } aom_stop_encode(header_bc); assert(header_bc->pos <= 0xffff); diff --git a/av1/encoder/bitstream.h b/av1/encoder/bitstream.h index ec40a4ec2..7fe6f5329 100644 --- a/av1/encoder/bitstream.h +++ b/av1/encoder/bitstream.h @@ -36,9 +36,6 @@ static INLINE int av1_preserve_existing_gf(AV1_COMP *cpi) { } void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd, -#if CONFIG_SUPERTX - const int supertx_enabled, -#endif #if CONFIG_TXK_SEL int blk_row, int blk_col, int block, int plane, TX_SIZE tx_size, diff --git a/av1/encoder/context_tree.c b/av1/encoder/context_tree.c index 4bbf0e5fb..eb33cd561 100644 --- a/av1/encoder/context_tree.c +++ b/av1/encoder/context_tree.c @@ -130,24 +130,10 @@ static void alloc_tree_contexts(AV1_COMMON *cm, PC_TREE *tree, int num_pix) { &tree->horizontal4[i]); alloc_mode_context(cm, num_pix / 4, PARTITION_HORZ_4, &tree->vertical4[i]); } -#if CONFIG_SUPERTX - alloc_mode_context(cm, num_pix, PARTITION_HORZ, &tree->horizontal_supertx); - alloc_mode_context(cm, num_pix, PARTITION_VERT, &tree->vertical_supertx); - alloc_mode_context(cm, num_pix, PARTITION_SPLIT, &tree->split_supertx); - alloc_mode_context(cm, num_pix, PARTITION_HORZ_A, &tree->horizontala_supertx); - alloc_mode_context(cm, num_pix, PARTITION_HORZ_B, &tree->horizontalb_supertx); - alloc_mode_context(cm, num_pix, PARTITION_VERT_A, &tree->verticala_supertx); - alloc_mode_context(cm, num_pix, PARTITION_VERT_B, &tree->verticalb_supertx); -#endif // CONFIG_SUPERTX #else alloc_mode_context(cm, num_pix, &tree->none); alloc_mode_context(cm, num_pix / 2, &tree->horizontal[0]); alloc_mode_context(cm, num_pix / 2, &tree->vertical[0]); -#if CONFIG_SUPERTX - alloc_mode_context(cm, num_pix, &tree->horizontal_supertx); - alloc_mode_context(cm, num_pix, &tree->vertical_supertx); - alloc_mode_context(cm, num_pix, &tree->split_supertx); -#endif if (num_pix > 16) { alloc_mode_context(cm, num_pix / 2, &tree->horizontal[1]); @@ -178,17 +164,6 @@ static void free_tree_contexts(PC_TREE *tree) { free_mode_context(&tree->horizontal[1]); free_mode_context(&tree->vertical[0]); free_mode_context(&tree->vertical[1]); -#if CONFIG_SUPERTX - free_mode_context(&tree->horizontal_supertx); - free_mode_context(&tree->vertical_supertx); - free_mode_context(&tree->split_supertx); -#if CONFIG_EXT_PARTITION_TYPES - free_mode_context(&tree->horizontala_supertx); - free_mode_context(&tree->horizontalb_supertx); - free_mode_context(&tree->verticala_supertx); - free_mode_context(&tree->verticalb_supertx); -#endif // CONFIG_EXT_PARTITION_TYPES -#endif // CONFIG_SUPERTX } // This function sets up a tree of contexts such that at each square diff --git a/av1/encoder/context_tree.h b/av1/encoder/context_tree.h index 38052ba27..0c956cdf6 100644 --- a/av1/encoder/context_tree.h +++ b/av1/encoder/context_tree.h @@ -90,17 +90,6 @@ typedef struct PC_TREE { struct PC_TREE *split[4]; PICK_MODE_CONTEXT *leaf_split[4]; }; -#if CONFIG_SUPERTX - PICK_MODE_CONTEXT horizontal_supertx; - PICK_MODE_CONTEXT vertical_supertx; - PICK_MODE_CONTEXT split_supertx; -#if CONFIG_EXT_PARTITION_TYPES - PICK_MODE_CONTEXT horizontala_supertx; - PICK_MODE_CONTEXT horizontalb_supertx; - PICK_MODE_CONTEXT verticala_supertx; - PICK_MODE_CONTEXT verticalb_supertx; -#endif -#endif } PC_TREE; void av1_setup_pc_tree(struct AV1Common *cm, struct ThreadData *td); diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c index eefeeab4c..4c888ba0f 100644 --- a/av1/encoder/encodeframe.c +++ b/av1/encoder/encodeframe.c @@ -39,9 +39,6 @@ #include "av1/encoder/aq_complexity.h" #include "av1/encoder/aq_cyclicrefresh.h" #include "av1/encoder/aq_variance.h" -#if CONFIG_SUPERTX -#include "av1/encoder/cost.h" -#endif #if CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION #include "av1/common/warped_motion.h" #endif // CONFIG_GLOBAL_MOTION || CONFIG_WARPED_MOTION @@ -74,34 +71,6 @@ static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row, int mi_col, BLOCK_SIZE bsize, int *rate); -#if CONFIG_SUPERTX -static int check_intra_b(PICK_MODE_CONTEXT *ctx); - -static int check_intra_sb(const AV1_COMP *cpi, const TileInfo *const tile, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PC_TREE *pc_tree); -static void predict_superblock(const AV1_COMP *const cpi, ThreadData *td, - int mi_row_ori, int mi_col_ori, int mi_row_pred, - int mi_col_pred, int plane, - BLOCK_SIZE bsize_pred, int b_sub8x8, int block); -static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size, - PC_TREE *pc_tree); -static void predict_sb_complex(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, - int mi_col, int mi_row_ori, int mi_col_ori, - RUN_TYPE dry_run, BLOCK_SIZE bsize, - BLOCK_SIZE top_bsize, uint8_t *dst_buf[3], - int dst_stride[3], PC_TREE *pc_tree); -static void update_state_sb_supertx(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, - int mi_col, BLOCK_SIZE bsize, - RUN_TYPE dry_run, PC_TREE *pc_tree); -static void rd_supertx_sb(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, int mi_col, - BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist, - TX_TYPE *best_tx, PC_TREE *pc_tree); -#endif // CONFIG_SUPERTX - // This is used as a reference when computing the source variance for the // purposes of activity masking. // Eventually this should be replaced by custom no-reference routines, @@ -340,116 +309,7 @@ static void set_offsets(const AV1_COMP *const cpi, const TileInfo *const tile, } else { mbmi->segment_id = 0; } - -#if CONFIG_SUPERTX - mbmi->segment_id_supertx = MAX_SEGMENTS; -#endif // CONFIG_SUPERTX -} - -#if CONFIG_SUPERTX -static void set_offsets_supertx(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, - int mi_col, BLOCK_SIZE bsize) { - MACROBLOCK *const x = &td->mb; - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - const int mi_width = mi_size_wide[bsize]; - const int mi_height = mi_size_high[bsize]; -#if CONFIG_DEPENDENT_HORZTILES - set_mode_info_offsets(cpi, x, xd, mi_row, mi_col, cm->dependent_horz_tiles); -#else - set_mode_info_offsets(cpi, x, xd, mi_row, mi_col); -#endif - - // Set up distance of MB to edge of frame in 1/8th pel units. - assert(!(mi_col & (mi_width - 1)) && !(mi_row & (mi_height - 1))); - set_mi_row_col(xd, tile, mi_row, mi_height, mi_col, mi_width, -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); -} - -static void set_offsets_extend(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row_pred, - int mi_col_pred, int mi_row_ori, int mi_col_ori, - BLOCK_SIZE bsize_pred) { - // Used in supertx - // (mi_row_ori, mi_col_ori, bsize_ori): region for mv - // (mi_row_pred, mi_col_pred, bsize_pred): region to predict - MACROBLOCK *const x = &td->mb; - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - const int mi_width = mi_size_wide[bsize_pred]; - const int mi_height = mi_size_high[bsize_pred]; - -#if CONFIG_DEPENDENT_HORZTILES - set_mode_info_offsets(cpi, x, xd, mi_row_ori, mi_col_ori, - cm->dependent_horz_tiles); -#else - set_mode_info_offsets(cpi, x, xd, mi_row_ori, mi_col_ori); -#endif - - // Set up limit values for MV components. - // Mv beyond the range do not produce new/different prediction block. - x->mv_limits.row_min = - -(((mi_row_pred + mi_height) * MI_SIZE) + AOM_INTERP_EXTEND); - x->mv_limits.col_min = - -(((mi_col_pred + mi_width) * MI_SIZE) + AOM_INTERP_EXTEND); - x->mv_limits.row_max = - (cm->mi_rows - mi_row_pred) * MI_SIZE + AOM_INTERP_EXTEND; - x->mv_limits.col_max = - (cm->mi_cols - mi_col_pred) * MI_SIZE + AOM_INTERP_EXTEND; - -// Set up distance of MB to edge of frame in 1/8th pel units. -#if !CONFIG_CB4X4 - assert(!(mi_col_pred & (mi_width - mi_size_wide[BLOCK_8X8])) && - !(mi_row_pred & (mi_height - mi_size_high[BLOCK_8X8]))); -#endif - set_mi_row_col(xd, tile, mi_row_pred, mi_height, mi_col_pred, mi_width, -#if CONFIG_DEPENDENT_HORZTILES - cm->dependent_horz_tiles, -#endif // CONFIG_DEPENDENT_HORZTILES - cm->mi_rows, cm->mi_cols); - xd->up_available = (mi_row_ori > tile->mi_row_start); - xd->left_available = (mi_col_ori > tile->mi_col_start); - - // R/D setup. - x->rdmult = cpi->rd.RDMULT; -} - -static void set_segment_id_supertx(const AV1_COMP *const cpi, - MACROBLOCK *const x, const int mi_row, - const int mi_col, const BLOCK_SIZE bsize) { - const AV1_COMMON *cm = &cpi->common; - const struct segmentation *seg = &cm->seg; - const int miw = AOMMIN(mi_size_wide[bsize], cm->mi_cols - mi_col); - const int mih = AOMMIN(mi_size_high[bsize], cm->mi_rows - mi_row); - const int mi_offset = mi_row * cm->mi_stride + mi_col; - MODE_INFO **const mip = cm->mi_grid_visible + mi_offset; - int r, c; - int seg_id_supertx = MAX_SEGMENTS; - - if (!seg->enabled) { - seg_id_supertx = 0; - } else { - // Find the minimum segment_id - for (r = 0; r < mih; r++) - for (c = 0; c < miw; c++) - seg_id_supertx = - AOMMIN(mip[r * cm->mi_stride + c]->mbmi.segment_id, seg_id_supertx); - assert(0 <= seg_id_supertx && seg_id_supertx < MAX_SEGMENTS); - - // Initialize plane quantisers - av1_init_plane_quantizers(cpi, x, seg_id_supertx); - } - - // Assign the the segment_id back to segment_id_supertx - for (r = 0; r < mih; r++) - for (c = 0; c < miw; c++) - mip[r * cm->mi_stride + c]->mbmi.segment_id_supertx = seg_id_supertx; } -#endif // CONFIG_SUPERTX #if CONFIG_DUAL_FILTER static void reset_intmv_filter_type(const AV1_COMMON *const cm, MACROBLOCKD *xd, @@ -593,9 +453,7 @@ static void update_state(const AV1_COMP *const cpi, ThreadData *td, int8_t rf_type; -#if !CONFIG_SUPERTX assert(mi->mbmi.sb_type == bsize); -#endif *mi_addr = *mi; *x->mbmi_ext = ctx->mbmi_ext; @@ -754,508 +612,121 @@ static void update_state(const AV1_COMP *const cpi, ThreadData *td, av1_copy_frame_mvs(cm, mi, mi_row, mi_col, x_mis, y_mis); } -#if CONFIG_SUPERTX -static void update_state_supertx(const AV1_COMP *const cpi, ThreadData *td, - PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, - BLOCK_SIZE bsize, RUN_TYPE dry_run) { - int y, x_idx; -#if CONFIG_VAR_TX - int i; -#endif - const AV1_COMMON *const cm = &cpi->common; - RD_COUNTS *const rdc = &td->rd_counts; +#if CONFIG_MOTION_VAR && NC_MODE_INFO +static void set_mode_info_b(const AV1_COMP *const cpi, + const TileInfo *const tile, ThreadData *td, + int mi_row, int mi_col, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx) { MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *mi = &ctx->mic; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - MODE_INFO *mi_addr = xd->mi[0]; - const struct segmentation *const seg = &cm->seg; - const int mis = cm->mi_stride; - const int mi_width = mi_size_wide[bsize]; - const int mi_height = mi_size_high[bsize]; - const int unify_bsize = CONFIG_CB4X4; - int8_t rf_type; - - *mi_addr = *mi; - *x->mbmi_ext = ctx->mbmi_ext; - assert(is_inter_block(mbmi)); - assert(mbmi->tx_size == ctx->mic.mbmi.tx_size); - -#if CONFIG_DUAL_FILTER - reset_intmv_filter_type(cm, xd, mbmi); -#endif - - rf_type = av1_ref_frame_type(mbmi->ref_frame); - if (x->mbmi_ext->ref_mv_count[rf_type] > 1 && - (mbmi->sb_type >= BLOCK_8X8 || unify_bsize)) { - set_ref_and_pred_mvs(x, mi->mbmi.pred_mv, rf_type); - } - - // If segmentation in use - if (seg->enabled) { - if (cpi->vaq_refresh) { - const int energy = - bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize); - mi_addr->mbmi.segment_id = av1_vaq_segment_id(energy); - } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) { - // For cyclic refresh mode, now update the segment map - // and set the segment id. - av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col, - bsize, ctx->rate, ctx->dist, 1); - } else { - // Otherwise just set the segment id based on the current segment map - const uint8_t *const map = - seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map; - mi_addr->mbmi.segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col); - } - mi_addr->mbmi.segment_id_supertx = MAX_SEGMENTS; - } - // Restore the coding context of the MB to that that was in place - // when the mode was picked for it - for (y = 0; y < mi_height; y++) - for (x_idx = 0; x_idx < mi_width; x_idx++) - if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > x_idx && - (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > y) { - xd->mi[x_idx + y * mis] = mi_addr; - } - -#if !CONFIG_CB4X4 - if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) { - mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int; - mbmi->mv[1].as_int = mi->bmi[3].as_mv[1].as_int; - } -#endif - - x->skip = ctx->skip; - -#if CONFIG_VAR_TX - for (i = 0; i < 1; ++i) - memcpy(x->blk_skip[i], ctx->blk_skip[i], - sizeof(uint8_t) * ctx->num_4x4_blk); - - if (!is_inter_block(mbmi) || mbmi->skip) - mbmi->min_tx_size = get_min_tx_size(mbmi->tx_size); -#endif // CONFIG_VAR_TX - -#if CONFIG_VAR_TX - { - const TX_SIZE mtx = mbmi->tx_size; - const int num_4x4_blocks_wide = tx_size_wide_unit[mtx] >> 1; - const int num_4x4_blocks_high = tx_size_high_unit[mtx] >> 1; - int idy, idx; - mbmi->inter_tx_size[0][0] = mtx; - for (idy = 0; idy < num_4x4_blocks_high; ++idy) - for (idx = 0; idx < num_4x4_blocks_wide; ++idx) - mbmi->inter_tx_size[idy][idx] = mtx; - } -#endif // CONFIG_VAR_TX - // Turn motion variation off for supertx - mbmi->motion_mode = SIMPLE_TRANSLATION; - - if (dry_run) return; - - if (!frame_is_intra_only(cm)) { - av1_update_mv_count(td); - -#if CONFIG_GLOBAL_MOTION - if (is_inter_block(mbmi)) { - if (bsize >= BLOCK_8X8) { - // TODO(sarahparker): global motion stats need to be handled per-tile - // to be compatible with tile-based threading. - update_global_motion_used(mbmi->mode, bsize, mbmi, rdc); - } else { - const int num_4x4_w = num_4x4_blocks_wide_lookup[bsize]; - const int num_4x4_h = num_4x4_blocks_high_lookup[bsize]; - int idx, idy; - for (idy = 0; idy < 2; idy += num_4x4_h) { - for (idx = 0; idx < 2; idx += num_4x4_w) { - const int j = idy * 2 + idx; - update_global_motion_used(mi->bmi[j].as_mode, bsize, mbmi, rdc); - } - } - } - } -#endif // CONFIG_GLOBAL_MOTION - - if (cm->interp_filter == SWITCHABLE -#if CONFIG_GLOBAL_MOTION - && !is_nontrans_global_motion(xd) -#endif // CONFIG_GLOBAL_MOTION - ) { -#if CONFIG_DUAL_FILTER - update_filter_type_count(td->counts, xd, mbmi); -#else - const int pred_ctx = av1_get_pred_context_switchable_interp(xd); - ++td->counts->switchable_interp[pred_ctx][mbmi->interp_filter]; -#endif - } - - rdc->comp_pred_diff[SINGLE_REFERENCE] += ctx->single_pred_diff; - rdc->comp_pred_diff[COMPOUND_REFERENCE] += ctx->comp_pred_diff; - rdc->comp_pred_diff[REFERENCE_MODE_SELECT] += ctx->hybrid_pred_diff; - } - - const int x_mis = AOMMIN(mi_width, cm->mi_cols - mi_col); - const int y_mis = AOMMIN(mi_height, cm->mi_rows - mi_row); - av1_copy_frame_mvs(cm, mi, mi_row, mi_col, x_mis, y_mis); + set_offsets(cpi, tile, x, mi_row, mi_col, bsize); + update_state(cpi, td, ctx, mi_row, mi_col, bsize, 1); } -static void update_state_sb_supertx(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, - int mi_col, BLOCK_SIZE bsize, - RUN_TYPE dry_run, PC_TREE *pc_tree) { +static void set_mode_info_sb(const AV1_COMP *const cpi, ThreadData *td, + const TileInfo *const tile, TOKENEXTRA **tp, + int mi_row, int mi_col, BLOCK_SIZE bsize, + PC_TREE *pc_tree) { const AV1_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - struct macroblock_plane *const p = x->plane; - struct macroblockd_plane *const pd = xd->plane; - int hbs = mi_size_wide[bsize] / 2; + const int hbs = mi_size_wide[bsize] / 2; + const PARTITION_TYPE partition = pc_tree->partitioning; + BLOCK_SIZE subsize = get_subsize(bsize, partition); +#if CONFIG_EXT_PARTITION_TYPES + const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT); + const int quarter_step = mi_size_wide[bsize] / 4; +#endif #if CONFIG_CB4X4 const int unify_bsize = 1; #else const int unify_bsize = 0; + assert(bsize >= BLOCK_8X8); #endif - PARTITION_TYPE partition = pc_tree->partitioning; - BLOCK_SIZE subsize = get_subsize(bsize, partition); - int i; -#if CONFIG_EXT_PARTITION_TYPES - BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT); -#endif - PICK_MODE_CONTEXT *pmc = NULL; if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - if (bsize == BLOCK_16X16 && cpi->vaq_refresh) - x->mb_energy = av1_block_energy(cpi, x, bsize); - switch (partition) { case PARTITION_NONE: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->none, mi_row, mi_col, subsize, - dry_run); + set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, &pc_tree->none); break; case PARTITION_VERT: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, - subsize, dry_run); + set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, + &pc_tree->vertical[0]); if (mi_col + hbs < cm->mi_cols && (bsize > BLOCK_8X8 || unify_bsize)) { - set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize); - update_state_supertx(cpi, td, &pc_tree->vertical[1], mi_row, - mi_col + hbs, subsize, dry_run); + set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, subsize, + &pc_tree->vertical[1]); } - pmc = &pc_tree->vertical_supertx; break; case PARTITION_HORZ: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->horizontal[0], mi_row, mi_col, - subsize, dry_run); + set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, + &pc_tree->horizontal[0]); if (mi_row + hbs < cm->mi_rows && (bsize > BLOCK_8X8 || unify_bsize)) { - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->horizontal[1], mi_row + hbs, - mi_col, subsize, dry_run); + set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, subsize, + &pc_tree->horizontal[1]); } - pmc = &pc_tree->horizontal_supertx; break; case PARTITION_SPLIT: if (bsize == BLOCK_8X8 && !unify_bsize) { - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_supertx(cpi, td, pc_tree->leaf_split[0], mi_row, mi_col, - subsize, dry_run); + set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, + pc_tree->leaf_split[0]); } else { - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, subsize, dry_run, - pc_tree->split[0]); - set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize); - update_state_sb_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize, - dry_run, pc_tree->split[1]); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize); - update_state_sb_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize, - dry_run, pc_tree->split[2]); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, subsize); - update_state_sb_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, - subsize, dry_run, pc_tree->split[3]); + set_mode_info_sb(cpi, td, tile, tp, mi_row, mi_col, subsize, + pc_tree->split[0]); + set_mode_info_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, subsize, + pc_tree->split[1]); + set_mode_info_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, subsize, + pc_tree->split[2]); + set_mode_info_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, subsize, + pc_tree->split[3]); } - pmc = &pc_tree->split_supertx; break; #if CONFIG_EXT_PARTITION_TYPES #if CONFIG_EXT_PARTITION_TYPES_AB -#error HORZ/VERT_A/B partitions not yet updated in superres code +#error NC_MODE_INFO+MOTION_VAR not yet supported for new HORZ/VERT_AB partitions #endif case PARTITION_HORZ_A: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, bsize2); - update_state_supertx(cpi, td, &pc_tree->horizontala[0], mi_row, mi_col, - bsize2, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, bsize2); - update_state_supertx(cpi, td, &pc_tree->horizontala[1], mi_row, - mi_col + hbs, bsize2, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->horizontala[2], mi_row + hbs, - mi_col, subsize, dry_run); - pmc = &pc_tree->horizontala_supertx; + set_mode_info_b(cpi, tile, td, mi_row, mi_col, bsize2, + &pc_tree->horizontala[0]); + set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, bsize2, + &pc_tree->horizontala[1]); + set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, subsize, + &pc_tree->horizontala[2]); break; case PARTITION_HORZ_B: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->horizontalb[0], mi_row, mi_col, - subsize, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, bsize2); - update_state_supertx(cpi, td, &pc_tree->horizontalb[1], mi_row + hbs, - mi_col, bsize2, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, bsize2); - update_state_supertx(cpi, td, &pc_tree->horizontalb[2], mi_row + hbs, - mi_col + hbs, bsize2, dry_run); - pmc = &pc_tree->horizontalb_supertx; + set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, + &pc_tree->horizontalb[0]); + set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, bsize2, + &pc_tree->horizontalb[1]); + set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col + hbs, bsize2, + &pc_tree->horizontalb[2]); break; case PARTITION_VERT_A: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, bsize2); - update_state_supertx(cpi, td, &pc_tree->verticala[0], mi_row, mi_col, - bsize2, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col, bsize2); - update_state_supertx(cpi, td, &pc_tree->verticala[1], mi_row + hbs, - mi_col, bsize2, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, subsize); - update_state_supertx(cpi, td, &pc_tree->verticala[2], mi_row, - mi_col + hbs, subsize, dry_run); - pmc = &pc_tree->verticala_supertx; + set_mode_info_b(cpi, tile, td, mi_row, mi_col, bsize2, + &pc_tree->verticala[0]); + set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, bsize2, + &pc_tree->verticala[1]); + set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, subsize, + &pc_tree->verticala[2]); break; case PARTITION_VERT_B: - set_offsets_supertx(cpi, td, tile, mi_row, mi_col, subsize); - update_state_supertx(cpi, td, &pc_tree->verticalb[0], mi_row, mi_col, - subsize, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row, mi_col + hbs, bsize2); - update_state_supertx(cpi, td, &pc_tree->verticalb[1], mi_row, - mi_col + hbs, bsize2, dry_run); - set_offsets_supertx(cpi, td, tile, mi_row + hbs, mi_col + hbs, bsize2); - update_state_supertx(cpi, td, &pc_tree->verticalb[2], mi_row + hbs, - mi_col + hbs, bsize2, dry_run); - pmc = &pc_tree->verticalb_supertx; + set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, + &pc_tree->verticalb[0]); + set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, bsize2, + &pc_tree->verticalb[1]); + set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col + hbs, bsize2, + &pc_tree->verticalb[2]); break; -#endif // CONFIG_EXT_PARTITION_TYPES - default: assert(0); - } + case PARTITION_HORZ_4: + for (int i = 0; i < 4; ++i) { + int this_mi_row = mi_row + i * quarter_step; + if (i > 0 && this_mi_row >= cm->mi_rows) break; - for (i = 0; i < MAX_MB_PLANE; ++i) { - if (pmc != NULL) { - p[i].coeff = pmc->coeff[i]; - p[i].qcoeff = pmc->qcoeff[i]; - pd[i].dqcoeff = pmc->dqcoeff[i]; - p[i].eobs = pmc->eobs[i]; - } else { - // These should never be used - p[i].coeff = NULL; - p[i].qcoeff = NULL; - pd[i].dqcoeff = NULL; - p[i].eobs = NULL; - } - } -} - -static void update_supertx_param(ThreadData *td, PICK_MODE_CONTEXT *ctx, - int best_tx, TX_SIZE supertx_size) { - MACROBLOCK *const x = &td->mb; -#if CONFIG_VAR_TX - int i; - - for (i = 0; i < 1; ++i) - memcpy(ctx->blk_skip[i], x->blk_skip[i], - sizeof(uint8_t) * ctx->num_4x4_blk); - ctx->mic.mbmi.min_tx_size = get_min_tx_size(supertx_size); -#endif // CONFIG_VAR_TX - ctx->mic.mbmi.tx_size = supertx_size; - ctx->skip = x->skip; - ctx->mic.mbmi.tx_type = best_tx; -} - -static void update_supertx_param_sb(const AV1_COMP *const cpi, ThreadData *td, - int mi_row, int mi_col, BLOCK_SIZE bsize, - int best_tx, TX_SIZE supertx_size, - PC_TREE *pc_tree) { - const AV1_COMMON *const cm = &cpi->common; - const int hbs = mi_size_wide[bsize] / 2; - PARTITION_TYPE partition = pc_tree->partitioning; - BLOCK_SIZE subsize = get_subsize(bsize, partition); -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; -#endif -#if CONFIG_EXT_PARTITION_TYPES - int i; -#endif - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - - switch (partition) { - case PARTITION_NONE: - update_supertx_param(td, &pc_tree->none, best_tx, supertx_size); - break; - case PARTITION_VERT: - update_supertx_param(td, &pc_tree->vertical[0], best_tx, supertx_size); - if (mi_col + hbs < cm->mi_cols && (bsize > BLOCK_8X8 || unify_bsize)) - update_supertx_param(td, &pc_tree->vertical[1], best_tx, supertx_size); - break; - case PARTITION_HORZ: - update_supertx_param(td, &pc_tree->horizontal[0], best_tx, supertx_size); - if (mi_row + hbs < cm->mi_rows && (bsize > BLOCK_8X8 || unify_bsize)) - update_supertx_param(td, &pc_tree->horizontal[1], best_tx, - supertx_size); - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - update_supertx_param(td, pc_tree->leaf_split[0], best_tx, supertx_size); - } else { - update_supertx_param_sb(cpi, td, mi_row, mi_col, subsize, best_tx, - supertx_size, pc_tree->split[0]); - update_supertx_param_sb(cpi, td, mi_row, mi_col + hbs, subsize, best_tx, - supertx_size, pc_tree->split[1]); - update_supertx_param_sb(cpi, td, mi_row + hbs, mi_col, subsize, best_tx, - supertx_size, pc_tree->split[2]); - update_supertx_param_sb(cpi, td, mi_row + hbs, mi_col + hbs, subsize, - best_tx, supertx_size, pc_tree->split[3]); - } - break; -#if CONFIG_EXT_PARTITION_TYPES -#if CONFIG_EXT_PARTITION_TYPES_AB -#error HORZ/VERT_A/B partitions not yet updated in superres code -#endif - case PARTITION_HORZ_A: - for (i = 0; i < 3; i++) - update_supertx_param(td, &pc_tree->horizontala[i], best_tx, - supertx_size); - break; - case PARTITION_HORZ_B: - for (i = 0; i < 3; i++) - update_supertx_param(td, &pc_tree->horizontalb[i], best_tx, - supertx_size); - break; - case PARTITION_VERT_A: - for (i = 0; i < 3; i++) - update_supertx_param(td, &pc_tree->verticala[i], best_tx, supertx_size); - break; - case PARTITION_VERT_B: - for (i = 0; i < 3; i++) - update_supertx_param(td, &pc_tree->verticalb[i], best_tx, supertx_size); - break; -#endif // CONFIG_EXT_PARTITION_TYPES - default: assert(0); - } -} -#endif // CONFIG_SUPERTX - -#if CONFIG_MOTION_VAR && NC_MODE_INFO -static void set_mode_info_b(const AV1_COMP *const cpi, - const TileInfo *const tile, ThreadData *td, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PICK_MODE_CONTEXT *ctx) { - MACROBLOCK *const x = &td->mb; - set_offsets(cpi, tile, x, mi_row, mi_col, bsize); - update_state(cpi, td, ctx, mi_row, mi_col, bsize, 1); -} - -static void set_mode_info_sb(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, TOKENEXTRA **tp, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PC_TREE *pc_tree) { - const AV1_COMMON *const cm = &cpi->common; - const int hbs = mi_size_wide[bsize] / 2; - const PARTITION_TYPE partition = pc_tree->partitioning; - BLOCK_SIZE subsize = get_subsize(bsize, partition); -#if CONFIG_EXT_PARTITION_TYPES - const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT); - const int quarter_step = mi_size_wide[bsize] / 4; -#endif -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; - assert(bsize >= BLOCK_8X8); -#endif - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - - switch (partition) { - case PARTITION_NONE: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, &pc_tree->none); - break; - case PARTITION_VERT: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, - &pc_tree->vertical[0]); - if (mi_col + hbs < cm->mi_cols && (bsize > BLOCK_8X8 || unify_bsize)) { - set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, subsize, - &pc_tree->vertical[1]); - } - break; - case PARTITION_HORZ: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, - &pc_tree->horizontal[0]); - if (mi_row + hbs < cm->mi_rows && (bsize > BLOCK_8X8 || unify_bsize)) { - set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, subsize, - &pc_tree->horizontal[1]); - } - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, - pc_tree->leaf_split[0]); - } else { - set_mode_info_sb(cpi, td, tile, tp, mi_row, mi_col, subsize, - pc_tree->split[0]); - set_mode_info_sb(cpi, td, tile, tp, mi_row, mi_col + hbs, subsize, - pc_tree->split[1]); - set_mode_info_sb(cpi, td, tile, tp, mi_row + hbs, mi_col, subsize, - pc_tree->split[2]); - set_mode_info_sb(cpi, td, tile, tp, mi_row + hbs, mi_col + hbs, subsize, - pc_tree->split[3]); - } - break; -#if CONFIG_EXT_PARTITION_TYPES -#if CONFIG_EXT_PARTITION_TYPES_AB -#error NC_MODE_INFO+MOTION_VAR not yet supported for new HORZ/VERT_AB partitions -#endif - case PARTITION_HORZ_A: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, bsize2, - &pc_tree->horizontala[0]); - set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, bsize2, - &pc_tree->horizontala[1]); - set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, subsize, - &pc_tree->horizontala[2]); - break; - case PARTITION_HORZ_B: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, - &pc_tree->horizontalb[0]); - set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, bsize2, - &pc_tree->horizontalb[1]); - set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col + hbs, bsize2, - &pc_tree->horizontalb[2]); - break; - case PARTITION_VERT_A: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, bsize2, - &pc_tree->verticala[0]); - set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col, bsize2, - &pc_tree->verticala[1]); - set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, subsize, - &pc_tree->verticala[2]); - break; - case PARTITION_VERT_B: - set_mode_info_b(cpi, tile, td, mi_row, mi_col, subsize, - &pc_tree->verticalb[0]); - set_mode_info_b(cpi, tile, td, mi_row, mi_col + hbs, bsize2, - &pc_tree->verticalb[1]); - set_mode_info_b(cpi, tile, td, mi_row + hbs, mi_col + hbs, bsize2, - &pc_tree->verticalb[2]); - break; - case PARTITION_HORZ_4: - for (int i = 0; i < 4; ++i) { - int this_mi_row = mi_row + i * quarter_step; - if (i > 0 && this_mi_row >= cm->mi_rows) break; - - set_mode_info_b(cpi, tile, td, this_mi_row, mi_col, subsize, - &pc_tree->horizontal4[i]); - } - break; - case PARTITION_VERT_4: - for (int i = 0; i < 4; ++i) { - int this_mi_col = mi_col + i * quarter_step; - if (i > 0 && this_mi_col >= cm->mi_cols) break; + set_mode_info_b(cpi, tile, td, this_mi_row, mi_col, subsize, + &pc_tree->horizontal4[i]); + } + break; + case PARTITION_VERT_4: + for (int i = 0; i < 4; ++i) { + int this_mi_col = mi_col + i * quarter_step; + if (i > 0 && this_mi_col >= cm->mi_cols) break; set_mode_info_b(cpi, tile, td, mi_row, this_mi_col, subsize, &pc_tree->vertical4[i]); @@ -1388,9 +859,6 @@ static void dist_8x8_set_sub8x8_dst(MACROBLOCK *const x, uint8_t *dst8x8, static void rd_pick_sb_modes(const AV1_COMP *const cpi, TileDataEnc *tile_data, MACROBLOCK *const x, int mi_row, int mi_col, RD_STATS *rd_cost, -#if CONFIG_SUPERTX - int *totalrate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_TYPE partition, #endif @@ -1419,14 +887,6 @@ static void rd_pick_sb_modes(const AV1_COMP *const cpi, TileDataEnc *tile_data, mbmi->mi_row = mi_row; mbmi->mi_col = mi_col; #endif -#if CONFIG_SUPERTX - // We set tx_size here as skip blocks would otherwise not set it. - // tx_size needs to be set at this point as supertx_enable in - // write_modes_sb is computed based on this, and if the garbage in memory - // just happens to be the supertx_size, then the packer will code this - // block as a supertx block, even if rdopt did not pick it as such. - mbmi->tx_size = max_txsize_lookup[bsize]; -#endif #if CONFIG_EXT_PARTITION_TYPES mbmi->partition = partition; #endif @@ -1497,25 +957,13 @@ static void rd_pick_sb_modes(const AV1_COMP *const cpi, TileDataEnc *tile_data, // as a predictor for MBs that follow in the SB if (frame_is_intra_only(cm)) { av1_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd); -#if CONFIG_SUPERTX - *totalrate_nocoef = 0; -#endif // CONFIG_SUPERTX } else { if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) { av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, mi_row, mi_col, rd_cost, bsize, ctx, best_rd); -#if CONFIG_SUPERTX - *totalrate_nocoef = rd_cost->rate; -#endif // CONFIG_SUPERTX } else { av1_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost, -#if CONFIG_SUPERTX - totalrate_nocoef, -#endif // CONFIG_SUPERTX bsize, ctx, best_rd); -#if CONFIG_SUPERTX - assert(*totalrate_nocoef >= 0); -#endif // CONFIG_SUPERTX } } @@ -1569,12 +1017,7 @@ static void update_inter_mode_stats(FRAME_COUNTS *counts, PREDICTION_MODE mode, } static void update_stats(const AV1_COMMON *const cm, ThreadData *td, int mi_row, - int mi_col -#if CONFIG_SUPERTX - , - int supertx_enabled -#endif - ) { + int mi_col) { MACROBLOCK *x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; const MODE_INFO *const mi = xd->mi[0]; @@ -1648,10 +1091,7 @@ static void update_stats(const AV1_COMMON *const cm, ThreadData *td, int mi_row, const int seg_ref_active = segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME); if (!seg_ref_active) { -#if CONFIG_SUPERTX - if (!supertx_enabled) -#endif - counts->intra_inter[av1_get_intra_inter_context(xd)][inter_block]++; + counts->intra_inter[av1_get_intra_inter_context(xd)][inter_block]++; #if CONFIG_NEW_MULTISYMBOL update_cdf(fc->intra_inter_cdf[av1_get_intra_inter_context(xd)], inter_block, 2); @@ -1759,9 +1199,6 @@ static void update_stats(const AV1_COMMON *const cm, ThreadData *td, int mi_row, #if CONFIG_INTERINTRA if (cm->reference_mode != COMPOUND_REFERENCE && -#if CONFIG_SUPERTX - !supertx_enabled && -#endif cm->allow_interintra_compound && is_interintra_allowed(mbmi)) { const int bsize_group = size_group_lookup[bsize]; if (mbmi->ref_frame[1] == INTRA_FRAME) { @@ -1800,35 +1237,32 @@ static void update_stats(const AV1_COMMON *const cm, ThreadData *td, int mi_row, xd, #endif mi); -#if CONFIG_SUPERTX - if (!supertx_enabled) -#endif // CONFIG_SUPERTX - if (mbmi->ref_frame[1] != INTRA_FRAME) + if (mbmi->ref_frame[1] != INTRA_FRAME) #if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION - { - if (motion_allowed == WARPED_CAUSAL) { - counts->motion_mode[mbmi->sb_type][mbmi->motion_mode]++; - update_cdf(fc->motion_mode_cdf[mbmi->sb_type], mbmi->motion_mode, - MOTION_MODES); + { + if (motion_allowed == WARPED_CAUSAL) { + counts->motion_mode[mbmi->sb_type][mbmi->motion_mode]++; + update_cdf(fc->motion_mode_cdf[mbmi->sb_type], mbmi->motion_mode, + MOTION_MODES); #if CONFIG_NCOBMC_ADAPT_WEIGHT - } else if (motion_allowed == NCOBMC_ADAPT_WEIGHT) { - counts->ncobmc[mbmi->sb_type][mbmi->motion_mode]++; - update_cdf(fc->ncobmc_cdf[mbmi->sb_type], mbmi->motion_mode, - OBMC_FAMILY_MODES); - } else if (motion_allowed == OBMC_CAUSAL) { - counts->obmc[mbmi->sb_type][mbmi->motion_mode == OBMC_CAUSAL]++; - update_cdf(fc->obmc_cdf[mbmi->sb_type], mbmi->motion_mode, 2); - } + } else if (motion_allowed == NCOBMC_ADAPT_WEIGHT) { + counts->ncobmc[mbmi->sb_type][mbmi->motion_mode]++; + update_cdf(fc->ncobmc_cdf[mbmi->sb_type], mbmi->motion_mode, + OBMC_FAMILY_MODES); + } else if (motion_allowed == OBMC_CAUSAL) { + counts->obmc[mbmi->sb_type][mbmi->motion_mode == OBMC_CAUSAL]++; + update_cdf(fc->obmc_cdf[mbmi->sb_type], mbmi->motion_mode, 2); + } #else - } else if (motion_allowed == OBMC_CAUSAL) { - counts->obmc[mbmi->sb_type][mbmi->motion_mode == OBMC_CAUSAL]++; + } else if (motion_allowed == OBMC_CAUSAL) { + counts->obmc[mbmi->sb_type][mbmi->motion_mode == OBMC_CAUSAL]++; #if CONFIG_NEW_MULTISYMBOL - update_cdf(fc->obmc_cdf[mbmi->sb_type], - mbmi->motion_mode == OBMC_CAUSAL, 2); + update_cdf(fc->obmc_cdf[mbmi->sb_type], + mbmi->motion_mode == OBMC_CAUSAL, 2); #endif - } -#endif // CONFIG_NCOBMC_ADAPT_WEIGHT } +#endif // CONFIG_NCOBMC_ADAPT_WEIGHT + } #else if (motion_allowed > SIMPLE_TRANSLATION) { counts->motion_mode[mbmi->sb_type][mbmi->motion_mode]++; @@ -2138,11 +1572,7 @@ static void encode_b(const AV1_COMP *const cpi, const TileInfo *const tile, mbmi->current_delta_lf_from_base = xd->prev_delta_lf_from_base; } #endif -#if CONFIG_SUPERTX - update_stats(&cpi->common, td, mi_row, mi_col, 0); -#else update_stats(&cpi->common, td, mi_row, mi_col); -#endif } } @@ -2187,100 +1617,6 @@ static void encode_sb(const AV1_COMP *const cpi, ThreadData *td, if (!dry_run && ctx >= 0) td->counts->partition[ctx][partition]++; -#if CONFIG_SUPERTX - if (!frame_is_intra_only(cm) && bsize <= MAX_SUPERTX_BLOCK_SIZE && - partition != PARTITION_NONE && !xd->lossless[0]) { - int supertx_enabled; - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - supertx_enabled = check_supertx_sb(bsize, supertx_size, pc_tree); - if (supertx_enabled) { - const int mi_width = mi_size_wide[bsize]; - const int mi_height = mi_size_high[bsize]; - int x_idx, y_idx, i; - uint8_t *dst_buf[3]; - int dst_stride[3]; - set_skip_context(xd, mi_row, mi_col); - set_mode_info_offsets(cpi, x, xd, mi_row, mi_col); - update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize, dry_run, - pc_tree); - - av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row, - mi_col); - for (i = 0; i < MAX_MB_PLANE; i++) { - dst_buf[i] = xd->plane[i].dst.buf; - dst_stride[i] = xd->plane[i].dst.stride; - } - predict_sb_complex(cpi, td, tile, mi_row, mi_col, mi_row, mi_col, dry_run, - bsize, bsize, dst_buf, dst_stride, pc_tree); - - set_offsets_without_segment_id(cpi, tile, x, mi_row, mi_col, bsize); - set_segment_id_supertx(cpi, x, mi_row, mi_col, bsize); - - if (!x->skip) { - int this_rate = 0; - av1_encode_sb_supertx((AV1_COMMON *)cm, x, bsize); - av1_tokenize_sb_supertx(cpi, td, tp, dry_run, mi_row, mi_col, bsize, - rate); - if (rate) *rate += this_rate; - } else { - xd->mi[0]->mbmi.skip = 1; - if (!dry_run) td->counts->skip[av1_get_skip_context(xd)][1]++; - av1_reset_skip_context(xd, mi_row, mi_col, bsize); - } - if (!dry_run) { - for (y_idx = 0; y_idx < mi_height; y_idx++) - for (x_idx = 0; x_idx < mi_width; x_idx++) { - if ((xd->mb_to_right_edge >> (3 + MI_SIZE_LOG2)) + mi_width > - x_idx && - (xd->mb_to_bottom_edge >> (3 + MI_SIZE_LOG2)) + mi_height > - y_idx) { - xd->mi[x_idx + y_idx * cm->mi_stride]->mbmi.skip = - xd->mi[0]->mbmi.skip; - } - } - td->counts->supertx[partition_supertx_context_lookup[partition]] - [supertx_size][1]++; - td->counts->supertx_size[supertx_size]++; -#if CONFIG_ENTROPY_STATS -#if CONFIG_EXT_TX - if (get_ext_tx_types(supertx_size, bsize, 1, cm->reduced_tx_set_used) > - 1 && - !xd->mi[0]->mbmi.skip) { - const int eset = - get_ext_tx_set(supertx_size, bsize, 1, cm->reduced_tx_set_used); - if (eset > 0) { - ++td->counts - ->inter_ext_tx[eset][supertx_size][xd->mi[0]->mbmi.tx_type]; - } - } -#else - if (supertx_size < TX_32X32 && !xd->mi[0]->mbmi.skip) { - ++td->counts->inter_ext_tx[supertx_size][xd->mi[0]->mbmi.tx_type]; - } -#endif // CONFIG_EXT_TX -#endif // CONFIG_ENTROPY_STATS - } -#if CONFIG_EXT_PARTITION_TYPES - update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, - partition); -#else - if (partition != PARTITION_SPLIT || bsize == BLOCK_8X8) - update_partition_context(xd, mi_row, mi_col, subsize, bsize); -#endif -#if CONFIG_VAR_TX - set_txfm_ctxs(supertx_size, mi_width, mi_height, xd->mi[0]->mbmi.skip, - xd); -#endif // CONFIG_VAR_TX - return; - } else { - if (!dry_run) { - td->counts->supertx[partition_supertx_context_lookup[partition]] - [supertx_size][0]++; - } - } - } -#endif // CONFIG_SUPERTX - switch (partition) { case PARTITION_NONE: encode_b(cpi, tile, td, tp, mi_row, mi_col, dry_run, subsize, @@ -2522,9 +1858,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, TileDataEnc *tile_data, MODE_INFO **mib, TOKENEXTRA **tp, int mi_row, int mi_col, BLOCK_SIZE bsize, int *rate, int64_t *dist, -#if CONFIG_SUPERTX - int *rate_nocoef, -#endif int do_recon, PC_TREE *pc_tree) { AV1_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; @@ -2553,11 +1886,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, int do_partition_search = 1; PICK_MODE_CONTEXT *ctx_none = &pc_tree->none; const int unify_bsize = CONFIG_CB4X4; -#if CONFIG_SUPERTX - int last_part_rate_nocoef = INT_MAX; - int none_rate_nocoef = INT_MAX; - int chosen_rate_nocoef = INT_MAX; -#endif #if CONFIG_PVQ od_rollback_buffer pre_rdo_buf; #endif @@ -2611,9 +1939,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) { pc_tree->partitioning = PARTITION_NONE; rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &none_rdc, -#if CONFIG_SUPERTX - &none_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_NONE, #endif @@ -2622,9 +1947,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, if (none_rdc.rate < INT_MAX) { none_rdc.rate += x->partition_cost[pl][PARTITION_NONE]; none_rdc.rdcost = RDCOST(x->rdmult, none_rdc.rate, none_rdc.dist); -#if CONFIG_SUPERTX - none_rate_nocoef += x->partition_cost[pl][PARTITION_NONE]; -#endif } #if !CONFIG_PVQ @@ -2640,9 +1962,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, switch (partition) { case PARTITION_NONE: rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, -#if CONFIG_SUPERTX - &last_part_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_NONE, #endif @@ -2650,9 +1969,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, break; case PARTITION_HORZ: rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, -#if CONFIG_SUPERTX - &last_part_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_HORZ, #endif @@ -2660,42 +1976,27 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 && mi_row + hbs < cm->mi_rows) { RD_STATS tmp_rdc; -#if CONFIG_SUPERTX - int rt_nocoef = 0; -#endif PICK_MODE_CONTEXT *ctx_h = &pc_tree->horizontal[0]; av1_init_rd_stats(&tmp_rdc); update_state(cpi, td, ctx_h, mi_row, mi_col, subsize, 1); encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row, mi_col, subsize, NULL); rd_pick_sb_modes(cpi, tile_data, x, mi_row + hbs, mi_col, &tmp_rdc, -#if CONFIG_SUPERTX - &rt_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_HORZ, #endif subsize, &pc_tree->horizontal[1], INT64_MAX); if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { av1_invalid_rd_stats(&last_part_rdc); -#if CONFIG_SUPERTX - last_part_rate_nocoef = INT_MAX; -#endif break; } last_part_rdc.rate += tmp_rdc.rate; last_part_rdc.dist += tmp_rdc.dist; last_part_rdc.rdcost += tmp_rdc.rdcost; -#if CONFIG_SUPERTX - last_part_rate_nocoef += rt_nocoef; -#endif } break; case PARTITION_VERT: rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, -#if CONFIG_SUPERTX - &last_part_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_VERT, #endif @@ -2703,18 +2004,12 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, if (last_part_rdc.rate != INT_MAX && bsize >= BLOCK_8X8 && mi_col + hbs < cm->mi_cols) { RD_STATS tmp_rdc; -#if CONFIG_SUPERTX - int rt_nocoef = 0; -#endif PICK_MODE_CONTEXT *ctx_v = &pc_tree->vertical[0]; av1_init_rd_stats(&tmp_rdc); update_state(cpi, td, ctx_v, mi_row, mi_col, subsize, 1); encode_superblock(cpi, td, tp, DRY_RUN_NORMAL, mi_row, mi_col, subsize, NULL); rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + hbs, &tmp_rdc, -#if CONFIG_SUPERTX - &rt_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_VERT, #endif @@ -2722,25 +2017,16 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, INT64_MAX); if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { av1_invalid_rd_stats(&last_part_rdc); -#if CONFIG_SUPERTX - last_part_rate_nocoef = INT_MAX; -#endif break; } last_part_rdc.rate += tmp_rdc.rate; last_part_rdc.dist += tmp_rdc.dist; last_part_rdc.rdcost += tmp_rdc.rdcost; -#if CONFIG_SUPERTX - last_part_rate_nocoef += rt_nocoef; -#endif } break; case PARTITION_SPLIT: if (bsize == BLOCK_8X8 && !unify_bsize) { rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &last_part_rdc, -#if CONFIG_SUPERTX - &last_part_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_SPLIT, #endif @@ -2750,17 +2036,11 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, last_part_rdc.rate = 0; last_part_rdc.dist = 0; last_part_rdc.rdcost = 0; -#if CONFIG_SUPERTX - last_part_rate_nocoef = 0; -#endif for (i = 0; i < 4; i++) { int x_idx = (i & 1) * hbs; int y_idx = (i >> 1) * hbs; int jj = i >> 1, ii = i & 0x01; RD_STATS tmp_rdc; -#if CONFIG_SUPERTX - int rt_nocoef; -#endif if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols)) continue; @@ -2768,23 +2048,13 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, rd_use_partition(cpi, td, tile_data, mib + jj * hbs * cm->mi_stride + ii * hbs, tp, mi_row + y_idx, mi_col + x_idx, subsize, &tmp_rdc.rate, - &tmp_rdc.dist, -#if CONFIG_SUPERTX - &rt_nocoef, -#endif - i != 3, pc_tree->split[i]); + &tmp_rdc.dist, i != 3, pc_tree->split[i]); if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { av1_invalid_rd_stats(&last_part_rdc); -#if CONFIG_SUPERTX - last_part_rate_nocoef = INT_MAX; -#endif break; } last_part_rdc.rate += tmp_rdc.rate; last_part_rdc.dist += tmp_rdc.dist; -#if CONFIG_SUPERTX - last_part_rate_nocoef += rt_nocoef; -#endif } break; #if CONFIG_EXT_PARTITION_TYPES @@ -2802,9 +2072,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, last_part_rdc.rate += x->partition_cost[pl][partition]; last_part_rdc.rdcost = RDCOST(x->rdmult, last_part_rdc.rate, last_part_rdc.dist); -#if CONFIG_SUPERTX - last_part_rate_nocoef += x->partition_cost[pl][partition]; -#endif } if (do_partition_search && cpi->sf.adjust_partitioning_from_last_frame && @@ -2815,9 +2082,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, BLOCK_SIZE split_subsize = get_subsize(bsize, PARTITION_SPLIT); chosen_rdc.rate = 0; chosen_rdc.dist = 0; -#if CONFIG_SUPERTX - chosen_rate_nocoef = 0; -#endif #if !CONFIG_PVQ restore_context(x, &x_ctx, mi_row, mi_col, bsize); #else @@ -2830,9 +2094,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, int x_idx = (i & 1) * hbs; int y_idx = (i >> 1) * hbs; RD_STATS tmp_rdc; -#if CONFIG_SUPERTX - int rt_nocoef = 0; -#endif #if CONFIG_PVQ od_rollback_buffer buf; #endif @@ -2847,9 +2108,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, pc_tree->split[i]->partitioning = PARTITION_NONE; rd_pick_sb_modes(cpi, tile_data, x, mi_row + y_idx, mi_col + x_idx, &tmp_rdc, -#if CONFIG_SUPERTX - &rt_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_SPLIT, #endif @@ -2862,33 +2120,21 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, #endif if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) { av1_invalid_rd_stats(&chosen_rdc); -#if CONFIG_SUPERTX - chosen_rate_nocoef = INT_MAX; -#endif break; } chosen_rdc.rate += tmp_rdc.rate; chosen_rdc.dist += tmp_rdc.dist; -#if CONFIG_SUPERTX - chosen_rate_nocoef += rt_nocoef; -#endif if (i != 3) encode_sb(cpi, td, tile_info, tp, mi_row + y_idx, mi_col + x_idx, OUTPUT_ENABLED, split_subsize, pc_tree->split[i], NULL); chosen_rdc.rate += x->partition_cost[pl][PARTITION_NONE]; -#if CONFIG_SUPERTX - chosen_rate_nocoef += x->partition_cost[pl][PARTITION_SPLIT]; -#endif } if (chosen_rdc.rate < INT_MAX) { chosen_rdc.rate += x->partition_cost[pl][PARTITION_SPLIT]; chosen_rdc.rdcost = RDCOST(x->rdmult, chosen_rdc.rate, chosen_rdc.dist); -#if CONFIG_SUPERTX - chosen_rate_nocoef += x->partition_cost[pl][PARTITION_NONE]; -#endif } } @@ -2897,17 +2143,11 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, mib[0]->mbmi.sb_type = bsize; if (bsize >= BLOCK_8X8) pc_tree->partitioning = partition; chosen_rdc = last_part_rdc; -#if CONFIG_SUPERTX - chosen_rate_nocoef = last_part_rate_nocoef; -#endif } // If none was better set the partitioning to that. if (none_rdc.rdcost < chosen_rdc.rdcost) { if (bsize >= BLOCK_8X8) pc_tree->partitioning = PARTITION_NONE; chosen_rdc = none_rdc; -#if CONFIG_SUPERTX - chosen_rate_nocoef = none_rate_nocoef; -#endif } #if !CONFIG_PVQ @@ -2937,9 +2177,6 @@ static void rd_use_partition(AV1_COMP *cpi, ThreadData *td, *rate = chosen_rdc.rate; *dist = chosen_rdc.dist; -#if CONFIG_SUPERTX - *rate_nocoef = chosen_rate_nocoef; -#endif } /* clang-format off */ @@ -3228,20 +2465,11 @@ static int rd_try_subblock(const AV1_COMP *const cpi, ThreadData *td, int is_first, int is_last, int mi_row, int mi_col, BLOCK_SIZE subsize, RD_STATS *best_rdc, RD_STATS *sum_rdc, RD_STATS *this_rdc, -#if CONFIG_SUPERTX - int64_t best_rd, int *sum_rate_nocoef, - int *this_rate_nocoef, int *abort_flag, -#endif PARTITION_TYPE partition, PICK_MODE_CONTEXT *prev_ctx, PICK_MODE_CONTEXT *this_ctx) { -#if CONFIG_SUPERTX -#define RTS_X_RATE_NOCOEF_ARG ((is_first) ? sum_rate_nocoef : this_rate_nocoef), -#define RTS_MAX_RDCOST INT64_MAX -#else #define RTS_X_RATE_NOCOEF_ARG #define RTS_MAX_RDCOST best_rdc->rdcost -#endif MACROBLOCK *const x = &td->mb; @@ -3258,23 +2486,13 @@ static int rd_try_subblock(const AV1_COMP *const cpi, ThreadData *td, RTS_X_RATE_NOCOEF_ARG partition, subsize, this_ctx, rdcost_remaining); -#if CONFIG_SUPERTX - if (is_first) *abort_flag = sum_rdc->rdcost >= best_rd; -#endif - if (!is_first) { if (this_rdc->rate == INT_MAX) { sum_rdc->rdcost = INT64_MAX; -#if CONFIG_SUPERTX - *sum_rate_nocoef = INT_MAX; -#endif } else { sum_rdc->rate += this_rdc->rate; sum_rdc->dist += this_rdc->dist; sum_rdc->rdcost += this_rdc->rdcost; -#if CONFIG_SUPERTX - *sum_rate_nocoef += *this_rate_nocoef; -#endif } } @@ -3292,16 +2510,15 @@ static int rd_try_subblock(const AV1_COMP *const cpi, ThreadData *td, #undef RTS_MAX_RDCOST } -static void rd_test_partition3( - const AV1_COMP *const cpi, ThreadData *td, TileDataEnc *tile_data, - TOKENEXTRA **tp, PC_TREE *pc_tree, RD_STATS *best_rdc, - PICK_MODE_CONTEXT ctxs[3], PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, - BLOCK_SIZE bsize, PARTITION_TYPE partition, -#if CONFIG_SUPERTX - int64_t best_rd, int *best_rate_nocoef, RD_SEARCH_MACROBLOCK_CONTEXT *x_ctx, -#endif - int mi_row0, int mi_col0, BLOCK_SIZE subsize0, int mi_row1, int mi_col1, - BLOCK_SIZE subsize1, int mi_row2, int mi_col2, BLOCK_SIZE subsize2) { +static void rd_test_partition3(const AV1_COMP *const cpi, ThreadData *td, + TileDataEnc *tile_data, TOKENEXTRA **tp, + PC_TREE *pc_tree, RD_STATS *best_rdc, + PICK_MODE_CONTEXT ctxs[3], + PICK_MODE_CONTEXT *ctx, int mi_row, int mi_col, + BLOCK_SIZE bsize, PARTITION_TYPE partition, + int mi_row0, int mi_col0, BLOCK_SIZE subsize0, + int mi_row1, int mi_col1, BLOCK_SIZE subsize1, + int mi_row2, int mi_col2, BLOCK_SIZE subsize2) { MACROBLOCK *const x = &td->mb; MACROBLOCKD *const xd = &x->e_mbd; RD_STATS sum_rdc, this_rdc; @@ -3311,22 +2528,10 @@ static void rd_test_partition3( const int has_rows = mi_row + hbs < cm->mi_rows; const int has_cols = mi_col + hbs < cm->mi_cols; #endif // CONFIG_UNPOISON_PARTITION_CTX -#if CONFIG_SUPERTX || CONFIG_EXT_PARTITION_TYPES_AB +#if CONFIG_EXT_PARTITION_TYPES_AB const AV1_COMMON *const cm = &cpi->common; #endif -#if CONFIG_SUPERTX - TileInfo *const tile_info = &tile_data->tile_info; - int sum_rate_nocoef, this_rate_nocoef; - int abort_flag; - const int supertx_allowed = !frame_is_intra_only(cm) && - bsize <= MAX_SUPERTX_BLOCK_SIZE && - !xd->lossless[0]; - -#define RTP_STX_TRY_ARGS \ - best_rd, &sum_rate_nocoef, &this_rate_nocoef, &abort_flag, -#else #define RTP_STX_TRY_ARGS -#endif if (!rd_try_subblock(cpi, td, tile_data, tp, 1, 0, mi_row0, mi_col0, subsize0, best_rdc, &sum_rdc, &this_rdc, @@ -3354,43 +2559,7 @@ static void rd_test_partition3( RTP_STX_TRY_ARGS partition, &ctxs[1], &ctxs[2])) return; -#if CONFIG_SUPERTX - if (supertx_allowed && !abort_flag && sum_rdc.rdcost < INT64_MAX) { - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - const PARTITION_TYPE best_partition = pc_tree->partitioning; - pc_tree->partitioning = partition; - sum_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup[partition]] - [supertx_size], - 0); - sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); - - if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) { - TX_TYPE best_tx = DCT_DCT; - RD_STATS tmp_rdc = { sum_rate_nocoef, 0, 0 }; - - restore_context(x, x_ctx, mi_row, mi_col, bsize); - - rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate, - &tmp_rdc.dist, &best_tx, pc_tree); - - tmp_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup[partition]] - [supertx_size], - 1); - tmp_rdc.rdcost = RDCOST(x->rdmult, tmp_rdc.rate, tmp_rdc.dist); - if (tmp_rdc.rdcost < sum_rdc.rdcost) { - sum_rdc = tmp_rdc; - update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx, - supertx_size, pc_tree); - } - } - - pc_tree->partitioning = best_partition; - } -#endif - - if (sum_rdc.rdcost >= best_rdc->rdcost) return; + if (sum_rdc.rdcost >= best_rdc->rdcost) return; int pl = partition_plane_context(xd, mi_row, mi_col, #if CONFIG_UNPOISON_PARTITION_CTX @@ -3399,16 +2568,9 @@ static void rd_test_partition3( bsize); sum_rdc.rate += x->partition_cost[pl][partition]; sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); -#if CONFIG_SUPERTX - sum_rate_nocoef += x->partition_cost[pl][partition]; -#endif if (sum_rdc.rdcost >= best_rdc->rdcost) return; -#if CONFIG_SUPERTX - *best_rate_nocoef = sum_rate_nocoef; - assert(*best_rate_nocoef >= 0); -#endif *best_rdc = sum_rdc; pc_tree->partitioning = partition; @@ -3470,11 +2632,8 @@ static int64_t dist_8x8_yuv(const AV1_COMP *const cpi, MACROBLOCK *const x, static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, TileDataEnc *tile_data, TOKENEXTRA **tp, int mi_row, int mi_col, BLOCK_SIZE bsize, - RD_STATS *rd_cost, -#if CONFIG_SUPERTX - int *rate_nocoef, -#endif - int64_t best_rd, PC_TREE *pc_tree) { + RD_STATS *rd_cost, int64_t best_rd, + PC_TREE *pc_tree) { const AV1_COMMON *const cm = &cpi->common; TileInfo *const tile_info = &tile_data->tile_info; MACROBLOCK *const x = &td->mb; @@ -3513,13 +2672,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, #endif // CONFIG_CB4X4 const int *partition_cost = pl >= 0 ? x->partition_cost[pl] : x->partition_cost[0]; -#if CONFIG_SUPERTX - int this_rate_nocoef, sum_rate_nocoef = 0, best_rate_nocoef = INT_MAX; - int abort_flag; - const int supertx_allowed = !frame_is_intra_only(cm) && bsize >= BLOCK_8X8 && - bsize <= MAX_SUPERTX_BLOCK_SIZE && - !xd->lossless[0]; -#endif // CONFIG_SUPERTX int do_rectangular_split = 1; #if CONFIG_EXT_PARTITION_TYPES && !CONFIG_EXT_PARTITION_TYPES_AB @@ -3697,9 +2849,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, // PARTITION_NONE if (partition_none_allowed) { rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &this_rdc, -#if CONFIG_SUPERTX - &this_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_NONE, #endif @@ -3711,9 +2860,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, : 0; this_rdc.rate += pt_cost; this_rdc.rdcost = RDCOST(x->rdmult, this_rdc.rate, this_rdc.dist); -#if CONFIG_SUPERTX - this_rate_nocoef += pt_cost; -#endif } if (this_rdc.rdcost < best_rdc.rdcost) { @@ -3727,10 +2873,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, num_pels_log2_lookup[bsize]; best_rdc = this_rdc; -#if CONFIG_SUPERTX - best_rate_nocoef = this_rate_nocoef; - assert(best_rate_nocoef >= 0); -#endif if (bsize_at_least_8x8) pc_tree->partitioning = PARTITION_NONE; // If all y, u, v transform blocks in this partition are skippable, and @@ -3807,11 +2949,7 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, // store estimated motion vector if (cpi->sf.adaptive_motion_search) store_pred_mv(x, ctx_none); -#if CONFIG_SUPERTX - int64_t temp_best_rdcost = INT64_MAX; -#else int64_t temp_best_rdcost = best_rdc.rdcost; -#endif // PARTITION_SPLIT // TODO(jingning): use the motion vectors given by the above search as @@ -3825,58 +2963,13 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, av1_extract_interp_filter(ctx_none->mic.mbmi.interp_filters, 0); rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, -#if CONFIG_SUPERTX - &sum_rate_nocoef, -#endif #if CONFIG_EXT_PARTITION_TYPES PARTITION_SPLIT, #endif subsize, pc_tree->leaf_split[0], temp_best_rdcost); if (sum_rdc.rate == INT_MAX) { sum_rdc.rdcost = INT64_MAX; -#if CONFIG_SUPERTX - sum_rate_nocoef = INT_MAX; -#endif - } -#if CONFIG_SUPERTX - if (supertx_allowed && sum_rdc.rdcost < INT64_MAX) { - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - const PARTITION_TYPE best_partition = pc_tree->partitioning; - - pc_tree->partitioning = PARTITION_SPLIT; - - sum_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup - [PARTITION_SPLIT]][supertx_size], - 0); - sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); - - if (is_inter_mode(pc_tree->leaf_split[0]->mic.mbmi.mode)) { - TX_TYPE best_tx = DCT_DCT; - RD_STATS tmp_rdc; - av1_init_rd_stats(&tmp_rdc); - tmp_rdc.rate = sum_rate_nocoef; - - restore_context(x, &x_ctx, mi_row, mi_col, bsize); - - rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, - &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree); - - tmp_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup - [PARTITION_SPLIT]][supertx_size], - 1); - tmp_rdc.rdcost = RDCOST(x->rdmult, tmp_rdc.rate, tmp_rdc.dist); - if (tmp_rdc.rdcost < sum_rdc.rdcost) { - sum_rdc = tmp_rdc; - update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx, - supertx_size, pc_tree); - } - } - - pc_tree->partitioning = best_partition; } -#endif // CONFIG_SUPERTX reached_last_index = 1; } else { int idx; @@ -3890,27 +2983,17 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx_none); pc_tree->split[idx]->index = idx; - rd_pick_partition(cpi, td, tile_data, tp, mi_row + y_idx, - mi_col + x_idx, subsize, &this_rdc, -#if CONFIG_SUPERTX - &this_rate_nocoef, -#endif - temp_best_rdcost - sum_rdc.rdcost, - pc_tree->split[idx]); + rd_pick_partition( + cpi, td, tile_data, tp, mi_row + y_idx, mi_col + x_idx, subsize, + &this_rdc, temp_best_rdcost - sum_rdc.rdcost, pc_tree->split[idx]); if (this_rdc.rate == INT_MAX) { sum_rdc.rdcost = INT64_MAX; -#if CONFIG_SUPERTX - sum_rate_nocoef = INT_MAX; -#endif // CONFIG_SUPERTX break; } else { sum_rdc.rate += this_rdc.rate; sum_rdc.dist += this_rdc.dist; sum_rdc.rdcost += this_rdc.rdcost; -#if CONFIG_SUPERTX - sum_rate_nocoef += this_rate_nocoef; -#endif // CONFIG_SUPERTX } } reached_last_index = (idx == 4); @@ -3926,46 +3009,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); } #endif // CONFIG_DIST_8X8 && CONFIG_CB4X4 - -#if CONFIG_SUPERTX - if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && reached_last_index) { - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - const PARTITION_TYPE best_partition = pc_tree->partitioning; - - pc_tree->partitioning = PARTITION_SPLIT; - - sum_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup - [PARTITION_SPLIT]][supertx_size], - 0); - sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); - - if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) { - TX_TYPE best_tx = DCT_DCT; - RD_STATS tmp_rdc; - av1_init_rd_stats(&tmp_rdc); - tmp_rdc.rate = sum_rate_nocoef; - - restore_context(x, &x_ctx, mi_row, mi_col, bsize); - - rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, - &tmp_rdc.rate, &tmp_rdc.dist, &best_tx, pc_tree); - - tmp_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup - [PARTITION_SPLIT]][supertx_size], - 1); - tmp_rdc.rdcost = RDCOST(x->rdmult, tmp_rdc.rate, tmp_rdc.dist); - if (tmp_rdc.rdcost < sum_rdc.rdcost) { - sum_rdc = tmp_rdc; - update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx, - supertx_size, pc_tree); - } - } - - pc_tree->partitioning = best_partition; - } -#endif // CONFIG_SUPERTX } #if CONFIG_CFL && CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG @@ -3976,18 +3019,10 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, if (reached_last_index && sum_rdc.rdcost < best_rdc.rdcost) { sum_rdc.rate += partition_cost[PARTITION_SPLIT]; sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); -#if CONFIG_SUPERTX - sum_rate_nocoef += partition_cost[PARTITION_SPLIT]; -#endif // CONFIG_SUPERTX if (sum_rdc.rdcost < best_rdc.rdcost) { best_rdc = sum_rdc; -#if CONFIG_SUPERTX - best_rate_nocoef = sum_rate_nocoef; - assert(best_rate_nocoef >= 0); -#else temp_best_rdcost = best_rdc.rdcost; -#endif // CONFIG_SUPERTX pc_tree->partitioning = PARTITION_SPLIT; } } else if (cpi->sf.less_rectangular_check) { @@ -4013,19 +3048,11 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, av1_extract_interp_filter(ctx_none->mic.mbmi.interp_filters, 0); rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, -#if CONFIG_SUPERTX - &sum_rate_nocoef, -#endif // CONFIG_SUPERTX #if CONFIG_EXT_PARTITION_TYPES PARTITION_HORZ, #endif subsize, &pc_tree->horizontal[0], best_rdc.rdcost); -#if CONFIG_SUPERTX - abort_flag = - (sum_rdc.rdcost >= best_rd && (bsize > BLOCK_8X8 || unify_bsize)) || - (sum_rdc.rate == INT_MAX && bsize == BLOCK_8X8); -#endif if (sum_rdc.rdcost < temp_best_rdcost && !force_horz_split && (bsize > BLOCK_8X8 || unify_bsize)) { PICK_MODE_CONTEXT *ctx_h = &pc_tree->horizontal[0]; @@ -4040,21 +3067,12 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, pc_tree->horizontal[1].pred_interp_filter = av1_extract_interp_filter(ctx_h->mic.mbmi.interp_filters, 0); -#if CONFIG_SUPERTX - rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc, - &this_rate_nocoef, -#if CONFIG_EXT_PARTITION_TYPES - PARTITION_HORZ, -#endif - subsize, &pc_tree->horizontal[1], INT64_MAX); -#else rd_pick_sb_modes(cpi, tile_data, x, mi_row + mi_step, mi_col, &this_rdc, #if CONFIG_EXT_PARTITION_TYPES PARTITION_HORZ, #endif subsize, &pc_tree->horizontal[1], best_rdc.rdcost - sum_rdc.rdcost); -#endif // CONFIG_SUPERTX #if CONFIG_DIST_8X8 && CONFIG_CB4X4 if (x->using_dist_8x8 && this_rdc.rate != INT_MAX && bsize == BLOCK_8X8) { @@ -4067,16 +3085,10 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, if (this_rdc.rate == INT_MAX) { sum_rdc.rdcost = INT64_MAX; -#if CONFIG_SUPERTX - sum_rate_nocoef = INT_MAX; -#endif // CONFIG_SUPERTX } else { sum_rdc.rate += this_rdc.rate; sum_rdc.dist += this_rdc.dist; sum_rdc.rdcost += this_rdc.rdcost; -#if CONFIG_SUPERTX - sum_rate_nocoef += this_rate_nocoef; -#endif // CONFIG_SUPERTX } #if CONFIG_DIST_8X8 && CONFIG_CB4X4 if (x->using_dist_8x8 && sum_rdc.rdcost != INT64_MAX && @@ -4090,62 +3102,14 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, #endif // CONFIG_DIST_8X8 && CONFIG_CB4X4 } -#if CONFIG_SUPERTX - if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && !abort_flag) { - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - const PARTITION_TYPE best_partition = pc_tree->partitioning; - - pc_tree->partitioning = PARTITION_HORZ; - - sum_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]] - [supertx_size], - 0); - sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); - - if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) { - TX_TYPE best_tx = DCT_DCT; - RD_STATS tmp_rdc; - av1_init_rd_stats(&tmp_rdc); - tmp_rdc.rate = sum_rate_nocoef; - - restore_context(x, &x_ctx, mi_row, mi_col, bsize); - - rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate, - &tmp_rdc.dist, &best_tx, pc_tree); - - tmp_rdc.rate += av1_cost_bit( - cm->fc - ->supertx_prob[partition_supertx_context_lookup[PARTITION_HORZ]] - [supertx_size], - 1); - tmp_rdc.rdcost = RDCOST(x->rdmult, tmp_rdc.rate, tmp_rdc.dist); - if (tmp_rdc.rdcost < sum_rdc.rdcost) { - sum_rdc = tmp_rdc; - update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx, - supertx_size, pc_tree); - } - } - - pc_tree->partitioning = best_partition; - } -#endif // CONFIG_SUPERTX - #if CONFIG_CFL && CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG cfl_clear_sub8x8_val(xd->cfl); #endif // CONFIG_CFL && CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG if (sum_rdc.rdcost < best_rdc.rdcost) { sum_rdc.rate += partition_cost[PARTITION_HORZ]; sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); -#if CONFIG_SUPERTX - sum_rate_nocoef += partition_cost[PARTITION_HORZ]; -#endif // CONFIG_SUPERTX if (sum_rdc.rdcost < best_rdc.rdcost) { best_rdc = sum_rdc; -#if CONFIG_SUPERTX - best_rate_nocoef = sum_rate_nocoef; - assert(best_rate_nocoef >= 0); -#endif // CONFIG_SUPERTX pc_tree->partitioning = PARTITION_HORZ; } } @@ -4169,21 +3133,11 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, av1_extract_interp_filter(ctx_none->mic.mbmi.interp_filters, 0); rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col, &sum_rdc, -#if CONFIG_SUPERTX - &sum_rate_nocoef, -#endif // CONFIG_SUPERTX #if CONFIG_EXT_PARTITION_TYPES PARTITION_VERT, #endif subsize, &pc_tree->vertical[0], best_rdc.rdcost); -#if CONFIG_SUPERTX - abort_flag = - (sum_rdc.rdcost >= best_rd && (bsize > BLOCK_8X8 || unify_bsize)) || - (sum_rdc.rate == INT_MAX && bsize == BLOCK_8X8); - const int64_t vert_max_rdcost = INT64_MAX; -#else const int64_t vert_max_rdcost = best_rdc.rdcost; -#endif // CONFIG_SUPERTX if (sum_rdc.rdcost < vert_max_rdcost && !force_vert_split && (bsize > BLOCK_8X8 || unify_bsize)) { update_state(cpi, td, &pc_tree->vertical[0], mi_row, mi_col, subsize, 1); @@ -4197,22 +3151,12 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, pc_tree->vertical[1].pred_interp_filter = av1_extract_interp_filter(ctx_none->mic.mbmi.interp_filters, 0); -#if CONFIG_SUPERTX - rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc, - &this_rate_nocoef, -#if CONFIG_EXT_PARTITION_TYPES - PARTITION_VERT, -#endif - subsize, &pc_tree->vertical[1], - INT64_MAX - sum_rdc.rdcost); -#else rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + mi_step, &this_rdc, #if CONFIG_EXT_PARTITION_TYPES PARTITION_VERT, #endif subsize, &pc_tree->vertical[1], best_rdc.rdcost - sum_rdc.rdcost); -#endif // CONFIG_SUPERTX #if CONFIG_DIST_8X8 && CONFIG_CB4X4 if (x->using_dist_8x8 && this_rdc.rate != INT_MAX && bsize == BLOCK_8X8) { @@ -4225,16 +3169,10 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, if (this_rdc.rate == INT_MAX) { sum_rdc.rdcost = INT64_MAX; -#if CONFIG_SUPERTX - sum_rate_nocoef = INT_MAX; -#endif // CONFIG_SUPERTX } else { sum_rdc.rate += this_rdc.rate; sum_rdc.dist += this_rdc.dist; sum_rdc.rdcost += this_rdc.rdcost; -#if CONFIG_SUPERTX - sum_rate_nocoef += this_rate_nocoef; -#endif // CONFIG_SUPERTX } #if CONFIG_DIST_8X8 && CONFIG_CB4X4 if (x->using_dist_8x8 && sum_rdc.rdcost != INT64_MAX && @@ -4246,46 +3184,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, } #endif // CONFIG_DIST_8X8 && CONFIG_CB4X4 } -#if CONFIG_SUPERTX - if (supertx_allowed && sum_rdc.rdcost < INT64_MAX && !abort_flag) { - TX_SIZE supertx_size = max_txsize_lookup[bsize]; - const PARTITION_TYPE best_partition = pc_tree->partitioning; - - pc_tree->partitioning = PARTITION_VERT; - - sum_rdc.rate += av1_cost_bit( - cm->fc->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]] - [supertx_size], - 0); - sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); - - if (!check_intra_sb(cpi, tile_info, mi_row, mi_col, bsize, pc_tree)) { - TX_TYPE best_tx = DCT_DCT; - RD_STATS tmp_rdc; - av1_init_rd_stats(&tmp_rdc); - tmp_rdc.rate = sum_rate_nocoef; - - restore_context(x, &x_ctx, mi_row, mi_col, bsize); - - rd_supertx_sb(cpi, td, tile_info, mi_row, mi_col, bsize, &tmp_rdc.rate, - &tmp_rdc.dist, &best_tx, pc_tree); - - tmp_rdc.rate += av1_cost_bit( - cm->fc - ->supertx_prob[partition_supertx_context_lookup[PARTITION_VERT]] - [supertx_size], - 1); - tmp_rdc.rdcost = RDCOST(x->rdmult, tmp_rdc.rate, tmp_rdc.dist); - if (tmp_rdc.rdcost < sum_rdc.rdcost) { - sum_rdc = tmp_rdc; - update_supertx_param_sb(cpi, td, mi_row, mi_col, bsize, best_tx, - supertx_size, pc_tree); - } - } - - pc_tree->partitioning = best_partition; - } -#endif // CONFIG_SUPERTX #if CONFIG_CFL && CONFIG_CHROMA_SUB8X8 && CONFIG_DEBUG cfl_clear_sub8x8_val(xd->cfl); @@ -4294,15 +3192,8 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, if (sum_rdc.rdcost < best_rdc.rdcost) { sum_rdc.rate += partition_cost[PARTITION_VERT]; sum_rdc.rdcost = RDCOST(x->rdmult, sum_rdc.rate, sum_rdc.dist); -#if CONFIG_SUPERTX - sum_rate_nocoef += partition_cost[PARTITION_VERT]; -#endif // CONFIG_SUPERTX if (sum_rdc.rdcost < best_rdc.rdcost) { best_rdc = sum_rdc; -#if CONFIG_SUPERTX - best_rate_nocoef = sum_rate_nocoef; - assert(best_rate_nocoef >= 0); -#endif // CONFIG_SUPERTX pc_tree->partitioning = PARTITION_VERT; } } @@ -4349,23 +3240,17 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, #if CONFIG_EXT_PARTITION_TYPES_AB rd_test_partition3( cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->horizontala, - ctx_none, mi_row, mi_col, bsize, PARTITION_HORZ_A, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, get_subsize(bsize, PARTITION_HORZ_4), - mi_row + mi_step / 2, mi_col, get_subsize(bsize, PARTITION_HORZ_4), - mi_row + mi_step, mi_col, get_subsize(bsize, PARTITION_HORZ)); + ctx_none, mi_row, mi_col, bsize, PARTITION_HORZ_A, mi_row, mi_col, + get_subsize(bsize, PARTITION_HORZ_4), mi_row + mi_step / 2, mi_col, + get_subsize(bsize, PARTITION_HORZ_4), mi_row + mi_step, mi_col, + get_subsize(bsize, PARTITION_HORZ)); #else subsize = get_subsize(bsize, PARTITION_HORZ_A); rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->horizontala, ctx_none, mi_row, mi_col, bsize, - PARTITION_HORZ_A, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, bsize2, mi_row, mi_col + mi_step, bsize2, - mi_row + mi_step, mi_col, subsize); + PARTITION_HORZ_A, mi_row, mi_col, bsize2, mi_row, + mi_col + mi_step, bsize2, mi_row + mi_step, mi_col, + subsize); #endif #if !CONFIG_PVQ restore_context(x, &x_ctx, mi_row, mi_col, bsize); @@ -4378,23 +3263,17 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, #if CONFIG_EXT_PARTITION_TYPES_AB rd_test_partition3( cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->horizontalb, - ctx_none, mi_row, mi_col, bsize, PARTITION_HORZ_B, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, get_subsize(bsize, PARTITION_HORZ), mi_row + mi_step, - mi_col, get_subsize(bsize, PARTITION_HORZ_4), mi_row + 3 * mi_step / 2, - mi_col, get_subsize(bsize, PARTITION_HORZ_4)); + ctx_none, mi_row, mi_col, bsize, PARTITION_HORZ_B, mi_row, mi_col, + get_subsize(bsize, PARTITION_HORZ), mi_row + mi_step, mi_col, + get_subsize(bsize, PARTITION_HORZ_4), mi_row + 3 * mi_step / 2, mi_col, + get_subsize(bsize, PARTITION_HORZ_4)); #else subsize = get_subsize(bsize, PARTITION_HORZ_B); rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->horizontalb, ctx_none, mi_row, mi_col, bsize, - PARTITION_HORZ_B, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, subsize, mi_row + mi_step, mi_col, - bsize2, mi_row + mi_step, mi_col + mi_step, bsize2); + PARTITION_HORZ_B, mi_row, mi_col, subsize, + mi_row + mi_step, mi_col, bsize2, mi_row + mi_step, + mi_col + mi_step, bsize2); #endif #if !CONFIG_PVQ restore_context(x, &x_ctx, mi_row, mi_col, bsize); @@ -4407,23 +3286,17 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, #if CONFIG_EXT_PARTITION_TYPES_AB rd_test_partition3( cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->verticala, - ctx_none, mi_row, mi_col, bsize, PARTITION_VERT_A, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, get_subsize(bsize, PARTITION_VERT_4), mi_row, - mi_col + mi_step / 2, get_subsize(bsize, PARTITION_VERT_4), mi_row, - mi_col + mi_step, get_subsize(bsize, PARTITION_VERT)); + ctx_none, mi_row, mi_col, bsize, PARTITION_VERT_A, mi_row, mi_col, + get_subsize(bsize, PARTITION_VERT_4), mi_row, mi_col + mi_step / 2, + get_subsize(bsize, PARTITION_VERT_4), mi_row, mi_col + mi_step, + get_subsize(bsize, PARTITION_VERT)); #else subsize = get_subsize(bsize, PARTITION_VERT_A); rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->verticala, ctx_none, mi_row, mi_col, bsize, - PARTITION_VERT_A, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, bsize2, mi_row + mi_step, mi_col, bsize2, - mi_row, mi_col + mi_step, subsize); + PARTITION_VERT_A, mi_row, mi_col, bsize2, + mi_row + mi_step, mi_col, bsize2, mi_row, + mi_col + mi_step, subsize); #endif #if !CONFIG_PVQ restore_context(x, &x_ctx, mi_row, mi_col, bsize); @@ -4436,23 +3309,17 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, #if CONFIG_EXT_PARTITION_TYPES_AB rd_test_partition3( cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->verticalb, - ctx_none, mi_row, mi_col, bsize, PARTITION_VERT_B, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, get_subsize(bsize, PARTITION_VERT), mi_row, - mi_col + mi_step, get_subsize(bsize, PARTITION_VERT_4), mi_row, - mi_col + 3 * mi_step / 2, get_subsize(bsize, PARTITION_VERT_4)); + ctx_none, mi_row, mi_col, bsize, PARTITION_VERT_B, mi_row, mi_col, + get_subsize(bsize, PARTITION_VERT), mi_row, mi_col + mi_step, + get_subsize(bsize, PARTITION_VERT_4), mi_row, mi_col + 3 * mi_step / 2, + get_subsize(bsize, PARTITION_VERT_4)); #else subsize = get_subsize(bsize, PARTITION_VERT_B); rd_test_partition3(cpi, td, tile_data, tp, pc_tree, &best_rdc, pc_tree->verticalb, ctx_none, mi_row, mi_col, bsize, - PARTITION_VERT_B, -#if CONFIG_SUPERTX - best_rd, &best_rate_nocoef, &x_ctx, -#endif - mi_row, mi_col, subsize, mi_row, mi_col + mi_step, - bsize2, mi_row + mi_step, mi_col + mi_step, bsize2); + PARTITION_VERT_B, mi_row, mi_col, subsize, mi_row, + mi_col + mi_step, bsize2, mi_row + mi_step, + mi_col + mi_step, bsize2); #endif #if !CONFIG_PVQ restore_context(x, &x_ctx, mi_row, mi_col, bsize); @@ -4548,10 +3415,6 @@ static void rd_pick_partition(const AV1_COMP *const cpi, ThreadData *td, (void)best_rd; *rd_cost = best_rdc; -#if CONFIG_SUPERTX - *rate_nocoef = best_rate_nocoef; -#endif // CONFIG_SUPERTX - if (best_rdc.rate < INT_MAX && best_rdc.dist < INT64_MAX && pc_tree->index != 3) { if (bsize == cm->sb_size) { @@ -4632,9 +3495,6 @@ static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td, int dummy_rate; int64_t dummy_dist; RD_STATS dummy_rdc; -#if CONFIG_SUPERTX - int dummy_rate_nocoef; -#endif // CONFIG_SUPERTX int i; int seg_skip = 0; @@ -4739,22 +3599,14 @@ static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td, bsize = seg_skip ? cm->sb_size : sf->always_this_block_size; set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, cm->sb_size, - &dummy_rate, &dummy_dist, -#if CONFIG_SUPERTX - &dummy_rate_nocoef, -#endif // CONFIG_SUPERTX - 1, pc_root); + &dummy_rate, &dummy_dist, 1, pc_root); } else if (cpi->partition_search_skippable_frame) { BLOCK_SIZE bsize; set_offsets(cpi, tile_info, x, mi_row, mi_col, cm->sb_size); bsize = get_rd_var_based_fixed_partition(cpi, x, mi_row, mi_col); set_fixed_partitioning(cpi, tile_info, mi, mi_row, mi_col, bsize); rd_use_partition(cpi, td, tile_data, mi, tp, mi_row, mi_col, cm->sb_size, - &dummy_rate, &dummy_dist, -#if CONFIG_SUPERTX - &dummy_rate_nocoef, -#endif // CONFIG_SUPERTX - 1, pc_root); + &dummy_rate, &dummy_dist, 1, pc_root); } else { // If required set upper and lower partition size limits if (sf->auto_min_max_partition_size) { @@ -4763,11 +3615,7 @@ static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td, &x->min_partition_size, &x->max_partition_size); } rd_pick_partition(cpi, td, tile_data, tp, mi_row, mi_col, cm->sb_size, - &dummy_rdc, -#if CONFIG_SUPERTX - &dummy_rate_nocoef, -#endif // CONFIG_SUPERTX - INT64_MAX, pc_root); + &dummy_rdc, INT64_MAX, pc_root); } #if CONFIG_LPF_SB if (USE_LOOP_FILTER_SUPERBLOCK) { @@ -5760,23 +4608,12 @@ void av1_encode_frame(AV1_COMP *cpi) { #endif // CONFIG_EXT_TX && CONFIG_RECT_TX if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && count32x32_lp == 0 && count32x32_32x32p == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_16X16] == 0 && - cm->counts.supertx_size[TX_32X32] == 0 && - cm->counts.supertx_size[TX_64X64] == 0 && -#endif count64x64_64x64p == 0) { cm->tx_mode = ALLOW_8X8; reset_skip_tx_size(cm, TX_8X8); } else if (count8x8_8x8p == 0 && count8x8_lp == 0 && count16x16_16x16p == 0 && count16x16_lp == 0 && count32x32_32x32p == 0 && count32x32_lp == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_8X8] == 0 && - cm->counts.supertx_size[TX_16X16] == 0 && - cm->counts.supertx_size[TX_32X32] == 0 && - cm->counts.supertx_size[TX_64X64] == 0 && -#endif count64x64_64x64p == 0) { cm->tx_mode = ONLY_4X4; reset_skip_tx_size(cm, TX_4X4); @@ -5784,19 +4621,11 @@ void av1_encode_frame(AV1_COMP *cpi) { count32x32_lp == 0) { cm->tx_mode = ALLOW_64X64; } else if (count4x4 == 0 && count8x8_lp == 0 && count16x16_lp == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_64X64] == 0 && -#endif count64x64_64x64p == 0) { cm->tx_mode = ALLOW_32X32; reset_skip_tx_size(cm, TX_32X32); } else if (count4x4 == 0 && count8x8_lp == 0 && count32x32_lp == 0 && - count32x32_32x32p == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_32X32] == 0 && - cm->counts.supertx_size[TX_64X64] == 0 && -#endif - count64x64_64x64p == 0) { + count32x32_32x32p == 0 && count64x64_64x64p == 0) { cm->tx_mode = ALLOW_16X16; reset_skip_tx_size(cm, TX_16X16); } @@ -5841,30 +4670,16 @@ void av1_encode_frame(AV1_COMP *cpi) { count32x32 += counts->tx_size_implied[TX_32X32][TX_32X32]; #endif // CONFIG_EXT_TX && CONFIG_RECT_TX if (count4x4 == 0 && count16x16_lp == 0 && count16x16_16x16p == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_16X16] == 0 && - cm->counts.supertx_size[TX_32X32] == 0 && -#endif // CONFIG_SUPERTX count32x32 == 0) { cm->tx_mode = ALLOW_8X8; reset_skip_tx_size(cm, TX_8X8); } else if (count8x8_8x8p == 0 && count16x16_16x16p == 0 && - count8x8_lp == 0 && count16x16_lp == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_8X8] == 0 && - cm->counts.supertx_size[TX_16X16] == 0 && - cm->counts.supertx_size[TX_32X32] == 0 && -#endif // CONFIG_SUPERTX - count32x32 == 0) { + count8x8_lp == 0 && count16x16_lp == 0 && count32x32 == 0) { cm->tx_mode = ONLY_4X4; reset_skip_tx_size(cm, TX_4X4); } else if (count8x8_lp == 0 && count16x16_lp == 0 && count4x4 == 0) { cm->tx_mode = ALLOW_32X32; - } else if (count32x32 == 0 && count8x8_lp == 0 && -#if CONFIG_SUPERTX - cm->counts.supertx_size[TX_32X32] == 0 && -#endif // CONFIG_SUPERTX - count4x4 == 0) { + } else if (count32x32 == 0 && count8x8_lp == 0 && count4x4 == 0) { cm->tx_mode = ALLOW_16X16; reset_skip_tx_size(cm, TX_16X16); } @@ -6528,1112 +5343,3 @@ static void encode_superblock(const AV1_COMP *const cpi, ThreadData *td, } #endif // CONFIG_CFL && CONFIG_CHROMA_SUB8X8 } - -#if CONFIG_SUPERTX -static int check_intra_b(PICK_MODE_CONTEXT *ctx) { - if (!is_inter_mode((&ctx->mic)->mbmi.mode)) return 1; - if (ctx->mic.mbmi.ref_frame[1] == INTRA_FRAME) return 1; - return 0; -} - -static int check_intra_sb(const AV1_COMP *const cpi, const TileInfo *const tile, - int mi_row, int mi_col, BLOCK_SIZE bsize, - PC_TREE *pc_tree) { - const AV1_COMMON *const cm = &cpi->common; - const int hbs = mi_size_wide[bsize] / 2; - const PARTITION_TYPE partition = pc_tree->partitioning; - const BLOCK_SIZE subsize = get_subsize(bsize, partition); -#if CONFIG_EXT_PARTITION_TYPES - int i; -#endif -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; -#endif - -#if !CONFIG_CB4X4 - assert(bsize >= BLOCK_8X8); -#endif - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return 1; - - switch (partition) { - case PARTITION_NONE: return check_intra_b(&pc_tree->none); break; - case PARTITION_VERT: - if (check_intra_b(&pc_tree->vertical[0])) return 1; - if (mi_col + hbs < cm->mi_cols && (bsize > BLOCK_8X8 || unify_bsize)) { - if (check_intra_b(&pc_tree->vertical[1])) return 1; - } - break; - case PARTITION_HORZ: - if (check_intra_b(&pc_tree->horizontal[0])) return 1; - if (mi_row + hbs < cm->mi_rows && (bsize > BLOCK_8X8 || unify_bsize)) { - if (check_intra_b(&pc_tree->horizontal[1])) return 1; - } - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - if (check_intra_b(pc_tree->leaf_split[0])) return 1; - } else { - if (check_intra_sb(cpi, tile, mi_row, mi_col, subsize, - pc_tree->split[0])) - return 1; - if (check_intra_sb(cpi, tile, mi_row, mi_col + hbs, subsize, - pc_tree->split[1])) - return 1; - if (check_intra_sb(cpi, tile, mi_row + hbs, mi_col, subsize, - pc_tree->split[2])) - return 1; - if (check_intra_sb(cpi, tile, mi_row + hbs, mi_col + hbs, subsize, - pc_tree->split[3])) - return 1; - } - break; -#if CONFIG_EXT_PARTITION_TYPES -#if CONFIG_EXT_PARTITION_TYPES_AB -#error HORZ/VERT_A/B partitions not yet updated in superres code -#endif - case PARTITION_HORZ_A: - for (i = 0; i < 3; i++) { - if (check_intra_b(&pc_tree->horizontala[i])) return 1; - } - break; - case PARTITION_HORZ_B: - for (i = 0; i < 3; i++) { - if (check_intra_b(&pc_tree->horizontalb[i])) return 1; - } - break; - case PARTITION_VERT_A: - for (i = 0; i < 3; i++) { - if (check_intra_b(&pc_tree->verticala[i])) return 1; - } - break; - case PARTITION_VERT_B: - for (i = 0; i < 3; i++) { - if (check_intra_b(&pc_tree->verticalb[i])) return 1; - } - break; -#endif // CONFIG_EXT_PARTITION_TYPES - default: assert(0); - } - return 0; -} - -static int check_supertx_b(TX_SIZE supertx_size, PICK_MODE_CONTEXT *ctx) { - return ctx->mic.mbmi.tx_size == supertx_size; -} - -static int check_supertx_sb(BLOCK_SIZE bsize, TX_SIZE supertx_size, - PC_TREE *pc_tree) { - PARTITION_TYPE partition; - BLOCK_SIZE subsize; -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; -#endif - - partition = pc_tree->partitioning; - subsize = get_subsize(bsize, partition); - switch (partition) { - case PARTITION_NONE: return check_supertx_b(supertx_size, &pc_tree->none); - case PARTITION_VERT: - return check_supertx_b(supertx_size, &pc_tree->vertical[0]); - case PARTITION_HORZ: - return check_supertx_b(supertx_size, &pc_tree->horizontal[0]); - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8 && !unify_bsize) - return check_supertx_b(supertx_size, pc_tree->leaf_split[0]); - else - return check_supertx_sb(subsize, supertx_size, pc_tree->split[0]); -#if CONFIG_EXT_PARTITION_TYPES -#if CONFIG_EXT_PARTITION_TYPES_AB -#error HORZ/VERT_A/B partitions not yet updated in superres code -#endif - case PARTITION_HORZ_A: - return check_supertx_b(supertx_size, &pc_tree->horizontala[0]); - case PARTITION_HORZ_B: - return check_supertx_b(supertx_size, &pc_tree->horizontalb[0]); - case PARTITION_VERT_A: - return check_supertx_b(supertx_size, &pc_tree->verticala[0]); - case PARTITION_VERT_B: - return check_supertx_b(supertx_size, &pc_tree->verticalb[0]); -#endif // CONFIG_EXT_PARTITION_TYPES - default: assert(0); return 0; - } -} - -static void predict_superblock(const AV1_COMP *const cpi, ThreadData *td, - int mi_row_ori, int mi_col_ori, int mi_row_pred, - int mi_col_pred, int plane, - BLOCK_SIZE bsize_pred, int b_sub8x8, int block) { - // Used in supertx - // (mi_row_ori, mi_col_ori): location for mv - // (mi_row_pred, mi_col_pred, bsize_pred): region to predict - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - MODE_INFO *mi_8x8 = xd->mi[0]; - MODE_INFO *mi = mi_8x8; - MB_MODE_INFO *mbmi = &mi->mbmi; - int ref; - const int is_compound = has_second_ref(mbmi); - - set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); - - for (ref = 0; ref < 1 + is_compound; ++ref) { - YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]); - av1_setup_pre_planes(xd, ref, cfg, mi_row_pred, mi_col_pred, - &xd->block_refs[ref]->sf); - } - -#if CONFIG_COMPOUND_SINGLEREF - // Single ref compound mode - if (!is_compound && is_inter_singleref_comp_mode(mbmi->mode)) { - xd->block_refs[1] = xd->block_refs[0]; - YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[0]); - av1_setup_pre_planes(xd, 1, cfg, mi_row_pred, mi_col_pred, - &xd->block_refs[1]->sf); - } -#endif // CONFIG_COMPOUND_SINGLEREF - - if (!b_sub8x8) - av1_build_inter_predictor_sb_extend(cm, xd, mi_row_ori, mi_col_ori, - mi_row_pred, mi_col_pred, plane, - bsize_pred); - else - av1_build_inter_predictor_sb_sub8x8_extend(cm, xd, mi_row_ori, mi_col_ori, - mi_row_pred, mi_col_pred, plane, - bsize_pred, block); -} - -static void predict_b_extend(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int block, - int mi_row_ori, int mi_col_ori, int mi_row_pred, - int mi_col_pred, int mi_row_top, int mi_col_top, - int plane, uint8_t *dst_buf, int dst_stride, - BLOCK_SIZE bsize_top, BLOCK_SIZE bsize_pred, - RUN_TYPE dry_run, int b_sub8x8) { - // Used in supertx - // (mi_row_ori, mi_col_ori): location for mv - // (mi_row_pred, mi_col_pred, bsize_pred): region to predict - // (mi_row_top, mi_col_top, bsize_top): region of the top partition size - // block: sub location of sub8x8 blocks - // b_sub8x8: 1: ori is sub8x8; 0: ori is not sub8x8 - // bextend: 1: region to predict is an extension of ori; 0: not - - MACROBLOCK *const x = &td->mb; - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &x->e_mbd; - int r = (mi_row_pred - mi_row_top) * MI_SIZE; - int c = (mi_col_pred - mi_col_top) * MI_SIZE; - const int mi_width_top = mi_size_wide[bsize_top]; - const int mi_height_top = mi_size_high[bsize_top]; - - if (mi_row_pred < mi_row_top || mi_col_pred < mi_col_top || - mi_row_pred >= mi_row_top + mi_height_top || - mi_col_pred >= mi_col_top + mi_width_top || mi_row_pred >= cm->mi_rows || - mi_col_pred >= cm->mi_cols) - return; - - set_offsets_extend(cpi, td, tile, mi_row_pred, mi_col_pred, mi_row_ori, - mi_col_ori, bsize_pred); - xd->plane[plane].dst.stride = dst_stride; - xd->plane[plane].dst.buf = - dst_buf + (r >> xd->plane[plane].subsampling_y) * dst_stride + - (c >> xd->plane[plane].subsampling_x); - - predict_superblock(cpi, td, mi_row_ori, mi_col_ori, mi_row_pred, mi_col_pred, - plane, bsize_pred, b_sub8x8, block); - - if (!dry_run && (plane == 0) && (block == 0 || !b_sub8x8)) - update_stats(&cpi->common, td, mi_row_pred, mi_col_pred, 1); -} - -static void extend_dir(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int block, BLOCK_SIZE bsize, - BLOCK_SIZE top_bsize, int mi_row_ori, int mi_col_ori, - int mi_row, int mi_col, int mi_row_top, int mi_col_top, - int plane, uint8_t *dst_buf, int dst_stride, int dir) { - // dir: 0-lower, 1-upper, 2-left, 3-right - // 4-lowerleft, 5-upperleft, 6-lowerright, 7-upperright - MACROBLOCKD *xd = &td->mb.e_mbd; - const int mi_width = mi_size_wide[bsize]; - const int mi_height = mi_size_high[bsize]; - int xss = xd->plane[1].subsampling_x; - int yss = xd->plane[1].subsampling_y; -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; -#endif - int b_sub8x8 = (bsize < BLOCK_8X8) && !unify_bsize ? 1 : 0; - int wide_unit, high_unit; - int i, j; - int ext_offset = 0; - - BLOCK_SIZE extend_bsize; - int mi_row_pred, mi_col_pred; - - if (dir == 0 || dir == 1) { // lower and upper - extend_bsize = - (mi_width == mi_size_wide[BLOCK_8X8] || bsize < BLOCK_8X8 || xss < yss) - ? BLOCK_8X8 - : BLOCK_16X8; - -#if CONFIG_CB4X4 - if (bsize < BLOCK_8X8) { - extend_bsize = BLOCK_4X4; - ext_offset = mi_size_wide[BLOCK_8X8]; - } -#endif - wide_unit = mi_size_wide[extend_bsize]; - high_unit = mi_size_high[extend_bsize]; - - mi_row_pred = mi_row + ((dir == 0) ? mi_height : -(mi_height + ext_offset)); - mi_col_pred = mi_col; - - for (j = 0; j < mi_height + ext_offset; j += high_unit) - for (i = 0; i < mi_width + ext_offset; i += wide_unit) - predict_b_extend(cpi, td, tile, block, mi_row_ori, mi_col_ori, - mi_row_pred + j, mi_col_pred + i, mi_row_top, - mi_col_top, plane, dst_buf, dst_stride, top_bsize, - extend_bsize, 1, b_sub8x8); - } else if (dir == 2 || dir == 3) { // left and right - extend_bsize = - (mi_height == mi_size_high[BLOCK_8X8] || bsize < BLOCK_8X8 || yss < xss) - ? BLOCK_8X8 - : BLOCK_8X16; -#if CONFIG_CB4X4 - if (bsize < BLOCK_8X8) { - extend_bsize = BLOCK_4X4; - ext_offset = mi_size_wide[BLOCK_8X8]; - } -#endif - wide_unit = mi_size_wide[extend_bsize]; - high_unit = mi_size_high[extend_bsize]; - - mi_row_pred = mi_row; - mi_col_pred = mi_col + ((dir == 3) ? mi_width : -(mi_width + ext_offset)); - - for (j = 0; j < mi_height + ext_offset; j += high_unit) - for (i = 0; i < mi_width + ext_offset; i += wide_unit) - predict_b_extend(cpi, td, tile, block, mi_row_ori, mi_col_ori, - mi_row_pred + j, mi_col_pred + i, mi_row_top, - mi_col_top, plane, dst_buf, dst_stride, top_bsize, - extend_bsize, 1, b_sub8x8); - } else { - extend_bsize = BLOCK_8X8; -#if CONFIG_CB4X4 - if (bsize < BLOCK_8X8) { - extend_bsize = BLOCK_4X4; - ext_offset = mi_size_wide[BLOCK_8X8]; - } -#endif - wide_unit = mi_size_wide[extend_bsize]; - high_unit = mi_size_high[extend_bsize]; - - mi_row_pred = mi_row + ((dir == 4 || dir == 6) ? mi_height - : -(mi_height + ext_offset)); - mi_col_pred = - mi_col + ((dir == 6 || dir == 7) ? mi_width : -(mi_width + ext_offset)); - - for (j = 0; j < mi_height + ext_offset; j += high_unit) - for (i = 0; i < mi_width + ext_offset; i += wide_unit) - predict_b_extend(cpi, td, tile, block, mi_row_ori, mi_col_ori, - mi_row_pred + j, mi_col_pred + i, mi_row_top, - mi_col_top, plane, dst_buf, dst_stride, top_bsize, - extend_bsize, 1, b_sub8x8); - } -} - -static void extend_all(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int block, BLOCK_SIZE bsize, - BLOCK_SIZE top_bsize, int mi_row_ori, int mi_col_ori, - int mi_row, int mi_col, int mi_row_top, int mi_col_top, - int plane, uint8_t *dst_buf, int dst_stride) { - assert(block >= 0 && block < 4); - for (int i = 0; i < 8; ++i) { - extend_dir(cpi, td, tile, block, bsize, top_bsize, mi_row_ori, mi_col_ori, - mi_row, mi_col, mi_row_top, mi_col_top, plane, dst_buf, - dst_stride, i); - } -} - -// This function generates prediction for multiple blocks, between which -// discontinuity around boundary is reduced by smoothing masks. The basic -// smoothing mask is a soft step function along horz/vert direction. In more -// complicated case when a block is split into 4 subblocks, the basic mask is -// first applied to neighboring subblocks (2 pairs) in horizontal direction and -// then applied to the 2 masked prediction mentioned above in vertical direction -// If the block is split into more than one level, at every stage, masked -// prediction is stored in dst_buf[] passed from higher level. -static void predict_sb_complex(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, - int mi_col, int mi_row_top, int mi_col_top, - RUN_TYPE dry_run, BLOCK_SIZE bsize, - BLOCK_SIZE top_bsize, uint8_t *dst_buf[3], - int dst_stride[3], PC_TREE *pc_tree) { - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - const int hbs = mi_size_wide[bsize] / 2; - const int is_partition_root = bsize >= BLOCK_8X8; - const int ctx = is_partition_root - ? partition_plane_context(xd, mi_row, mi_col, -#if CONFIG_UNPOISON_PARTITION_CTX - mi_row + hbs < cm->mi_rows, - mi_col + hbs < cm->mi_cols, -#endif - bsize) - : -1; - const PARTITION_TYPE partition = pc_tree->partitioning; - const BLOCK_SIZE subsize = get_subsize(bsize, partition); -#if CONFIG_EXT_PARTITION_TYPES - const BLOCK_SIZE bsize2 = get_subsize(bsize, PARTITION_SPLIT); -#endif - - int i; - uint8_t *dst_buf1[3], *dst_buf2[3], *dst_buf3[3]; - DECLARE_ALIGNED(16, uint8_t, tmp_buf1[MAX_MB_PLANE * MAX_TX_SQUARE * 2]); - DECLARE_ALIGNED(16, uint8_t, tmp_buf2[MAX_MB_PLANE * MAX_TX_SQUARE * 2]); - DECLARE_ALIGNED(16, uint8_t, tmp_buf3[MAX_MB_PLANE * MAX_TX_SQUARE * 2]); - int dst_stride1[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE }; - int dst_stride2[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE }; - int dst_stride3[3] = { MAX_TX_SIZE, MAX_TX_SIZE, MAX_TX_SIZE }; -#if CONFIG_CB4X4 - const int unify_bsize = 1; -#else - const int unify_bsize = 0; - assert(bsize >= BLOCK_8X8); -#endif - - if (mi_row >= cm->mi_rows || mi_col >= cm->mi_cols) return; - -#if CONFIG_HIGHBITDEPTH - if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) { - int len = sizeof(uint16_t); - dst_buf1[0] = CONVERT_TO_BYTEPTR(tmp_buf1); - dst_buf1[1] = CONVERT_TO_BYTEPTR(tmp_buf1 + MAX_TX_SQUARE * len); - dst_buf1[2] = CONVERT_TO_BYTEPTR(tmp_buf1 + 2 * MAX_TX_SQUARE * len); - dst_buf2[0] = CONVERT_TO_BYTEPTR(tmp_buf2); - dst_buf2[1] = CONVERT_TO_BYTEPTR(tmp_buf2 + MAX_TX_SQUARE * len); - dst_buf2[2] = CONVERT_TO_BYTEPTR(tmp_buf2 + 2 * MAX_TX_SQUARE * len); - dst_buf3[0] = CONVERT_TO_BYTEPTR(tmp_buf3); - dst_buf3[1] = CONVERT_TO_BYTEPTR(tmp_buf3 + MAX_TX_SQUARE * len); - dst_buf3[2] = CONVERT_TO_BYTEPTR(tmp_buf3 + 2 * MAX_TX_SQUARE * len); - } else { -#endif // CONFIG_HIGHBITDEPTH - dst_buf1[0] = tmp_buf1; - dst_buf1[1] = tmp_buf1 + MAX_TX_SQUARE; - dst_buf1[2] = tmp_buf1 + 2 * MAX_TX_SQUARE; - dst_buf2[0] = tmp_buf2; - dst_buf2[1] = tmp_buf2 + MAX_TX_SQUARE; - dst_buf2[2] = tmp_buf2 + 2 * MAX_TX_SQUARE; - dst_buf3[0] = tmp_buf3; - dst_buf3[1] = tmp_buf3 + MAX_TX_SQUARE; - dst_buf3[2] = tmp_buf3 + 2 * MAX_TX_SQUARE; -#if CONFIG_HIGHBITDEPTH - } -#endif // CONFIG_HIGHBITDEPTH - - if (!dry_run && ctx >= 0 && bsize < top_bsize) { - // Explicitly cast away const. - FRAME_COUNTS *const frame_counts = (FRAME_COUNTS *)&cm->counts; - frame_counts->partition[ctx][partition]++; - } - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - } - - switch (partition) { - case PARTITION_NONE: - assert(bsize < top_bsize); - for (i = 0; i < MAX_MB_PLANE; ++i) { - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], dst_stride[i], - top_bsize, bsize, dry_run, 0); - extend_all(cpi, td, tile, 0, bsize, top_bsize, mi_row, mi_col, mi_row, - mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - } - break; - case PARTITION_HORZ: - if (bsize == BLOCK_8X8 && !unify_bsize) { - for (i = 0; i < MAX_MB_PLANE; ++i) { - // First half - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], dst_stride[i], - top_bsize, BLOCK_8X8, dry_run, 1); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - - // Second half - predict_b_extend(cpi, td, tile, 2, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], top_bsize, BLOCK_8X8, dry_run, 1); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 2, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i]); - } - - // Smooth - xd->plane[0].dst.buf = dst_buf[0]; - xd->plane[0].dst.stride = dst_stride[0]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - 0); - } else { - for (i = 0; i < MAX_MB_PLANE; ++i) { -#if CONFIG_CB4X4 - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - - if (handle_chroma_sub8x8) { - int mode_offset_row = CONFIG_CHROMA_SUB8X8 ? hbs : 0; - - predict_b_extend(cpi, td, tile, 0, mi_row + mode_offset_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i], top_bsize, bsize, - dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, bsize, top_bsize, - mi_row + mode_offset_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], dst_stride[i]); - } else { -#endif - // First half - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, subsize, dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], 0); - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - - if (mi_row + hbs < cm->mi_rows) { - // Second half - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, - mi_row + hbs, mi_col, mi_row_top, mi_col_top, i, - dst_buf1[i], dst_stride1[i], top_bsize, subsize, - dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, - mi_col, mi_row + hbs, mi_col, mi_row_top, mi_col_top, - i, dst_buf1[i], dst_stride1[i]); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, - mi_col, mi_row + hbs, mi_col, mi_row_top, mi_col_top, - i, dst_buf1[i], dst_stride1[i], 1); - // Smooth - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } -#if CONFIG_CB4X4 - } -#endif - } - } - break; - case PARTITION_VERT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - for (i = 0; i < MAX_MB_PLANE; ++i) { - // First half - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], dst_stride[i], - top_bsize, BLOCK_8X8, dry_run, 1); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - - // Second half - predict_b_extend(cpi, td, tile, 1, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], top_bsize, BLOCK_8X8, dry_run, 1); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 1, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i]); - } - - // Smooth - xd->plane[0].dst.buf = dst_buf[0]; - xd->plane[0].dst.stride = dst_stride[0]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[0], dst_stride[0], dst_buf1[0], dst_stride1[0], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - 0); - } else { - for (i = 0; i < MAX_MB_PLANE; ++i) { -#if CONFIG_CB4X4 - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - - if (handle_chroma_sub8x8) { - int mode_offset_col = CONFIG_CHROMA_SUB8X8 ? hbs : 0; - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + mode_offset_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, - dst_buf[i], dst_stride[i], top_bsize, bsize, - dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, bsize, top_bsize, mi_row, - mi_col + mode_offset_col, mi_row, mi_col, mi_row_top, - mi_col_top, i, dst_buf[i], dst_stride[i]); - } else { -#endif - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, subsize, dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], 3); - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - - if (mi_col + hbs < cm->mi_cols) { - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, i, - dst_buf1[i], dst_stride1[i], top_bsize, subsize, - dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf1[i], dst_stride1[i]); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf1[i], dst_stride1[i], 2); - - // smooth - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - } -#if CONFIG_CB4X4 - } -#endif - } - } - break; - case PARTITION_SPLIT: - if (bsize == BLOCK_8X8 && !unify_bsize) { - for (i = 0; i < MAX_MB_PLANE; i++) { - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], dst_stride[i], - top_bsize, BLOCK_8X8, dry_run, 1); - predict_b_extend(cpi, td, tile, 1, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i], top_bsize, BLOCK_8X8, dry_run, 1); - predict_b_extend(cpi, td, tile, 2, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf2[i], - dst_stride2[i], top_bsize, BLOCK_8X8, dry_run, 1); - predict_b_extend(cpi, td, tile, 3, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf3[i], - dst_stride3[i], top_bsize, BLOCK_8X8, dry_run, 1); - - if (bsize < top_bsize) { - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - extend_all(cpi, td, tile, 1, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf1[i], - dst_stride1[i]); - extend_all(cpi, td, tile, 2, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf2[i], - dst_stride2[i]); - extend_all(cpi, td, tile, 3, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf3[i], - dst_stride3[i]); - } - } -#if CONFIG_CB4X4 - } else if (bsize == BLOCK_8X8) { - for (i = 0; i < MAX_MB_PLANE; i++) { - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - - if (handle_chroma_sub8x8) { - int mode_offset_row = - CONFIG_CHROMA_SUB8X8 && mi_row + hbs < cm->mi_rows ? hbs : 0; - int mode_offset_col = - CONFIG_CHROMA_SUB8X8 && mi_col + hbs < cm->mi_cols ? hbs : 0; - - predict_b_extend(cpi, td, tile, 0, mi_row + mode_offset_row, - mi_col + mode_offset_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, BLOCK_8X8, dry_run, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, BLOCK_8X8, top_bsize, - mi_row + mode_offset_row, mi_col + mode_offset_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - } else { - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i], top_bsize, subsize, dry_run, 0); - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, i, - dst_buf1[i], dst_stride1[i], top_bsize, subsize, - dry_run, 0); - if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, - mi_row + hbs, mi_col, mi_row_top, mi_col_top, i, - dst_buf2[i], dst_stride2[i], top_bsize, subsize, - dry_run, 0); - if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col + hbs, - mi_row + hbs, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf3[i], dst_stride3[i], - top_bsize, subsize, dry_run, 0); - - if (bsize < top_bsize) { - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row, mi_col, mi_row_top, mi_col_top, i, dst_buf[i], - dst_stride[i]); - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, - mi_col + hbs, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf1[i], dst_stride1[i]); - if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, - mi_col, mi_row + hbs, mi_col, mi_row_top, mi_col_top, - i, dst_buf2[i], dst_stride2[i]); - if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, - mi_col + hbs, mi_row + hbs, mi_col + hbs, mi_row_top, - mi_col_top, i, dst_buf3[i], dst_stride3[i]); - } - } - } -#endif - } else { - predict_sb_complex(cpi, td, tile, mi_row, mi_col, mi_row_top, - mi_col_top, dry_run, subsize, top_bsize, dst_buf, - dst_stride, pc_tree->split[0]); - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) - predict_sb_complex(cpi, td, tile, mi_row, mi_col + hbs, mi_row_top, - mi_col_top, dry_run, subsize, top_bsize, dst_buf1, - dst_stride1, pc_tree->split[1]); - if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) - predict_sb_complex(cpi, td, tile, mi_row + hbs, mi_col, mi_row_top, - mi_col_top, dry_run, subsize, top_bsize, dst_buf2, - dst_stride2, pc_tree->split[2]); - if (mi_row + hbs < cm->mi_rows && mi_col + hbs < cm->mi_cols) - predict_sb_complex(cpi, td, tile, mi_row + hbs, mi_col + hbs, - mi_row_top, mi_col_top, dry_run, subsize, - top_bsize, dst_buf3, dst_stride3, - pc_tree->split[3]); - } - for (i = 0; i < MAX_MB_PLANE; i++) { -#if CONFIG_CB4X4 - const struct macroblockd_plane *pd = &xd->plane[i]; - int handle_chroma_sub8x8 = need_handle_chroma_sub8x8( - subsize, pd->subsampling_x, pd->subsampling_y); - if (handle_chroma_sub8x8) continue; // Skip <4x4 chroma smoothing -#else - if (bsize == BLOCK_8X8 && i != 0) - continue; // Skip <4x4 chroma smoothing -#endif - - if (mi_row < cm->mi_rows && mi_col + hbs < cm->mi_cols) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - if (mi_row + hbs < cm->mi_rows) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf2[i], dst_stride2[i], dst_buf3[i], dst_stride3[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } - } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } - } - break; -#if CONFIG_EXT_PARTITION_TYPES -#if CONFIG_EXT_PARTITION_TYPES_AB -#error HORZ/VERT_A/B partitions not yet updated in superres code -#endif - case PARTITION_HORZ_A: - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize, - bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf, dst_stride); - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf1, - dst_stride1, top_bsize, bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, dry_run, dst_buf1, dst_stride1); - - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf2, dst_stride2, - top_bsize, subsize, dry_run, 0, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf2, dst_stride2); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf2, dst_stride2, 1); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - i); - } - - break; - case PARTITION_VERT_A: - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize, - bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf, dst_stride); - - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf1, dst_stride1, - top_bsize, bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf1, dst_stride1); - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf2, - dst_stride2, top_bsize, subsize, dry_run, 0, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, dry_run, dst_buf2, dst_stride2); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, dry_run, dst_buf2, dst_stride2, 2); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf2[i], dst_stride2[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - i); - } - break; - case PARTITION_HORZ_B: - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize, - subsize, dry_run, 0, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf, dst_stride); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf, dst_stride, 0); - - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col, mi_row + hbs, - mi_col, mi_row_top, mi_col_top, dst_buf1, dst_stride1, - top_bsize, bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf1, dst_stride1); - - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col + hbs, - mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top, - dst_buf2, dst_stride2, top_bsize, bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs, - mi_col + hbs, mi_row_top, mi_col_top, dry_run, dst_buf2, - dst_stride2); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf1[i]; - xd->plane[i].dst.stride = dst_stride1[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_VERT, i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_HORZ, - i); - } - break; - case PARTITION_VERT_B: - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col, mi_row, mi_col, - mi_row_top, mi_col_top, dst_buf, dst_stride, top_bsize, - subsize, dry_run, 0, 0); - if (bsize < top_bsize) - extend_all(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf, dst_stride); - else - extend_dir(cpi, td, tile, 0, subsize, top_bsize, mi_row, mi_col, - mi_row_top, mi_col_top, dry_run, dst_buf, dst_stride, 3); - - predict_b_extend(cpi, td, tile, 0, mi_row, mi_col + hbs, mi_row, - mi_col + hbs, mi_row_top, mi_col_top, dst_buf1, - dst_stride1, top_bsize, bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row, mi_col + hbs, - mi_row_top, mi_col_top, dry_run, dst_buf1, dst_stride1); - - predict_b_extend(cpi, td, tile, 0, mi_row + hbs, mi_col + hbs, - mi_row + hbs, mi_col + hbs, mi_row_top, mi_col_top, - dst_buf2, dst_stride2, top_bsize, bsize2, dry_run, 0, 0); - extend_all(cpi, td, tile, 0, bsize2, top_bsize, mi_row + hbs, - mi_col + hbs, mi_row_top, mi_col_top, dry_run, dst_buf2, - dst_stride2); - - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf1[i]; - xd->plane[i].dst.stride = dst_stride1[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf1[i], dst_stride1[i], dst_buf2[i], dst_stride2[i], - mi_row, mi_col, mi_row_top, mi_col_top, bsize, top_bsize, - PARTITION_HORZ, i); - } - for (i = 0; i < MAX_MB_PLANE; i++) { - xd->plane[i].dst.buf = dst_buf[i]; - xd->plane[i].dst.stride = dst_stride[i]; - av1_build_masked_inter_predictor_complex( - xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i], mi_row, - mi_col, mi_row_top, mi_col_top, bsize, top_bsize, PARTITION_VERT, - i); - } - break; -#endif // CONFIG_EXT_PARTITION_TYPES - default: assert(0); - } - -#if CONFIG_EXT_PARTITION_TYPES - if (bsize < top_bsize) - update_ext_partition_context(xd, mi_row, mi_col, subsize, bsize, partition); -#else - if (bsize < top_bsize && (partition != PARTITION_SPLIT || bsize == BLOCK_8X8)) - update_partition_context(xd, mi_row, mi_col, subsize, bsize); -#endif // CONFIG_EXT_PARTITION_TYPES -} - -static void rd_supertx_sb(const AV1_COMP *const cpi, ThreadData *td, - const TileInfo *const tile, int mi_row, int mi_col, - BLOCK_SIZE bsize, int *tmp_rate, int64_t *tmp_dist, - TX_TYPE *best_tx, PC_TREE *pc_tree) { - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCK *const x = &td->mb; - MACROBLOCKD *const xd = &x->e_mbd; - int plane, pnskip, skippable, skippable_uv, rate_uv, this_rate, - base_rate = *tmp_rate; - int64_t sse, pnsse, sse_uv, this_dist, dist_uv; - uint8_t *dst_buf[3]; - int dst_stride[3]; - TX_SIZE tx_size; - MB_MODE_INFO *mbmi; - TX_TYPE tx_type, best_tx_nostx; - int tmp_rate_tx = 0, skip_tx = 0; - int64_t tmp_dist_tx = 0, rd_tx, bestrd_tx = INT64_MAX; - - set_skip_context(xd, mi_row, mi_col); - set_mode_info_offsets(cpi, x, xd, mi_row, mi_col); - update_state_sb_supertx(cpi, td, tile, mi_row, mi_col, bsize, 1, pc_tree); - av1_setup_dst_planes(xd->plane, bsize, get_frame_new_buffer(cm), mi_row, - mi_col); - for (plane = 0; plane < MAX_MB_PLANE; plane++) { - dst_buf[plane] = xd->plane[plane].dst.buf; - dst_stride[plane] = xd->plane[plane].dst.stride; - } - predict_sb_complex(cpi, td, tile, mi_row, mi_col, mi_row, mi_col, 1, bsize, - bsize, dst_buf, dst_stride, pc_tree); - - set_offsets_without_segment_id(cpi, tile, x, mi_row, mi_col, bsize); - set_segment_id_supertx(cpi, x, mi_row, mi_col, bsize); - - mbmi = &xd->mi[0]->mbmi; - best_tx_nostx = mbmi->tx_type; - - *best_tx = DCT_DCT; - - // chroma - skippable_uv = 1; - rate_uv = 0; - dist_uv = 0; - sse_uv = 0; - for (plane = 1; plane < MAX_MB_PLANE; ++plane) { -#if CONFIG_VAR_TX - ENTROPY_CONTEXT ctxa[2 * MAX_MIB_SIZE]; - ENTROPY_CONTEXT ctxl[2 * MAX_MIB_SIZE]; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - RD_STATS this_rd_stats; - av1_init_rd_stats(&this_rd_stats); - - tx_size = max_txsize_lookup[bsize]; - tx_size = - uv_txsize_lookup[bsize][tx_size][cm->subsampling_x][cm->subsampling_y]; - av1_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl); - - av1_subtract_plane(x, bsize, plane); - av1_tx_block_rd_b(cpi, x, tx_size, 0, 0, plane, 0, - get_plane_block_size(bsize, pd), &ctxa[0], &ctxl[0], - &this_rd_stats); - - this_rate = this_rd_stats.rate; - this_dist = this_rd_stats.dist; - pnsse = this_rd_stats.sse; - pnskip = this_rd_stats.skip; -#else - tx_size = max_txsize_lookup[bsize]; - tx_size = - uv_txsize_lookup[bsize][tx_size][cm->subsampling_x][cm->subsampling_y]; - av1_subtract_plane(x, bsize, plane); - av1_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip, - &pnsse, INT64_MAX, plane, bsize, tx_size, 0); -#endif // CONFIG_VAR_TX - - rate_uv += this_rate; - dist_uv += this_dist; - sse_uv += pnsse; - skippable_uv &= pnskip; - } - - // luma - tx_size = max_txsize_lookup[bsize]; - av1_subtract_plane(x, bsize, 0); -#if CONFIG_EXT_TX - int ext_tx_set = get_ext_tx_set(tx_size, bsize, 1, cm->reduced_tx_set_used); - const TxSetType tx_set_type = - get_ext_tx_set_type(tx_size, bsize, 1, cm->reduced_tx_set_used); -#endif // CONFIG_EXT_TX - for (tx_type = DCT_DCT; tx_type < TX_TYPES; ++tx_type) { -#if CONFIG_VAR_TX - ENTROPY_CONTEXT ctxa[2 * MAX_MIB_SIZE]; - ENTROPY_CONTEXT ctxl[2 * MAX_MIB_SIZE]; - const struct macroblockd_plane *const pd = &xd->plane[0]; - RD_STATS this_rd_stats; -#endif // CONFIG_VAR_TX - -#if CONFIG_EXT_TX - if (!av1_ext_tx_used[tx_set_type][tx_type]) continue; -#else - if (tx_size >= TX_32X32 && tx_type != DCT_DCT) continue; -#endif // CONFIG_EXT_TX - mbmi->tx_type = tx_type; - -#if CONFIG_VAR_TX - av1_init_rd_stats(&this_rd_stats); - av1_get_entropy_contexts(bsize, tx_size, pd, ctxa, ctxl); - av1_tx_block_rd_b(cpi, x, tx_size, 0, 0, 0, 0, bsize, &ctxa[0], &ctxl[0], - &this_rd_stats); - - this_rate = this_rd_stats.rate; - this_dist = this_rd_stats.dist; - pnsse = this_rd_stats.sse; - pnskip = this_rd_stats.skip; -#else - av1_txfm_rd_in_plane_supertx(x, cpi, &this_rate, &this_dist, &pnskip, - &pnsse, INT64_MAX, 0, bsize, tx_size, 0); -#endif // CONFIG_VAR_TX - -#if CONFIG_EXT_TX - if (get_ext_tx_types(tx_size, bsize, 1, cm->reduced_tx_set_used) > 1 && - !xd->lossless[xd->mi[0]->mbmi.segment_id] && this_rate != INT_MAX) { - if (ext_tx_set > 0) - this_rate += - x->inter_tx_type_costs[ext_tx_set][mbmi->tx_size][mbmi->tx_type]; - } -#else - if (tx_size < TX_32X32 && !xd->lossless[xd->mi[0]->mbmi.segment_id] && - this_rate != INT_MAX) { - this_rate += x->inter_tx_type_costs[tx_size][mbmi->tx_type]; - } -#endif // CONFIG_EXT_TX - *tmp_rate = rate_uv + this_rate; - *tmp_dist = dist_uv + this_dist; - sse = sse_uv + pnsse; - skippable = skippable_uv && pnskip; - if (skippable) { - *tmp_rate = av1_cost_bit(av1_get_skip_prob(cm, xd), 1); - x->skip = 1; - } else { - if (RDCOST(x->rdmult, *tmp_rate, *tmp_dist) < RDCOST(x->rdmult, 0, sse)) { - *tmp_rate += av1_cost_bit(av1_get_skip_prob(cm, xd), 0); - x->skip = 0; - } else { - *tmp_dist = sse; - *tmp_rate = av1_cost_bit(av1_get_skip_prob(cm, xd), 1); - x->skip = 1; - } - } - *tmp_rate += base_rate; - rd_tx = RDCOST(x->rdmult, *tmp_rate, *tmp_dist); - if (rd_tx < bestrd_tx * 0.99 || tx_type == DCT_DCT) { - *best_tx = tx_type; - bestrd_tx = rd_tx; - tmp_rate_tx = *tmp_rate; - tmp_dist_tx = *tmp_dist; - skip_tx = x->skip; - } - } - *tmp_rate = tmp_rate_tx; - *tmp_dist = tmp_dist_tx; - x->skip = skip_tx; -#if CONFIG_VAR_TX - for (plane = 0; plane < 1; ++plane) - memset(x->blk_skip[plane], x->skip, - sizeof(uint8_t) * pc_tree->none.num_4x4_blk); -#endif // CONFIG_VAR_TX - xd->mi[0]->mbmi.tx_type = best_tx_nostx; -} -#endif // CONFIG_SUPERTX diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c index f35ce8a4f..96d2c0fb2 100644 --- a/av1/encoder/encodemb.c +++ b/av1/encoder/encodemb.c @@ -989,34 +989,6 @@ void av1_encode_sb(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row, } } -#if CONFIG_SUPERTX -void av1_encode_sb_supertx(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize) { - MACROBLOCKD *const xd = &x->e_mbd; - struct optimize_ctx ctx; - MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi; - struct encode_b_args arg = { cm, x, &ctx, &mbmi->skip, NULL, NULL, 1 }; - int plane; - - mbmi->skip = 1; - if (x->skip) return; - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - const struct macroblockd_plane *const pd = &xd->plane[plane]; -#if CONFIG_VAR_TX - const TX_SIZE tx_size = TX_4X4; -#else - const TX_SIZE tx_size = av1_get_tx_size(plane, xd); -#endif - av1_subtract_plane(x, bsize, plane); - av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane], ctx.tl[plane]); - arg.ta = ctx.ta[plane]; - arg.tl = ctx.tl[plane]; - av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block, - &arg); - } -} -#endif // CONFIG_SUPERTX - #if !CONFIG_PVQ void av1_set_txb_context(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l) { diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h index c817a94f0..1c0b66f33 100644 --- a/av1/encoder/encodemb.h +++ b/av1/encoder/encodemb.h @@ -45,9 +45,6 @@ typedef enum AV1_XFORM_QUANT { void av1_encode_sb(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize, int mi_row, int mi_col); -#if CONFIG_SUPERTX -void av1_encode_sb_supertx(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize); -#endif // CONFIG_SUPERTX void av1_encode_sby_pass1(AV1_COMMON *cm, MACROBLOCK *x, BLOCK_SIZE bsize); void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block, int blk_row, int blk_col, BLOCK_SIZE plane_bsize, diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c index 9525ad329..1efaf26f5 100644 --- a/av1/encoder/firstpass.c +++ b/av1/encoder/firstpass.c @@ -687,9 +687,6 @@ void av1_first_pass(AV1_COMP *cpi, const struct lookahead_entry *source) { // Do intra 16x16 prediction. xd->mi[0]->mbmi.segment_id = 0; -#if CONFIG_SUPERTX - xd->mi[0]->mbmi.segment_id_supertx = 0; -#endif // CONFIG_SUPERTX xd->lossless[xd->mi[0]->mbmi.segment_id] = (qindex == 0); xd->mi[0]->mbmi.mode = DC_PRED; xd->mi[0]->mbmi.tx_size = diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c index 639803da0..399db7f6b 100644 --- a/av1/encoder/rdopt.c +++ b/av1/encoder/rdopt.c @@ -1414,10 +1414,10 @@ static int cost_coeffs(const AV1_COMMON *const cm, MACROBLOCK *x, int plane, const int cat6_bits = av1_get_cat6_extrabits_size(tx_size, 8); #endif // CONFIG_HIGHBITDEPTH -#if !CONFIG_VAR_TX && !CONFIG_SUPERTX +#if !CONFIG_VAR_TX // Check for consistency of tx_size with mode info assert(tx_size == av1_get_tx_size(plane, xd)); -#endif // !CONFIG_VAR_TX && !CONFIG_SUPERTX +#endif // !CONFIG_VAR_TX (void)cm; if (eob == 0) { @@ -1870,9 +1870,9 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col, x->tune_metric != AOM_TUNE_PSNR; #endif // CONFIG_DIST_8X8 -#if !CONFIG_SUPERTX && !CONFIG_VAR_TX +#if !CONFIG_VAR_TX assert(tx_size == av1_get_tx_size(plane, xd)); -#endif // !CONFIG_SUPERTX +#endif // !CONFIG_VAR_TX av1_init_rd_stats(&this_rd_stats); @@ -2129,46 +2129,6 @@ static void txfm_rd_in_plane(MACROBLOCK *x, const AV1_COMP *cpi, } } -#if CONFIG_SUPERTX -void av1_txfm_rd_in_plane_supertx(MACROBLOCK *x, const AV1_COMP *cpi, int *rate, - int64_t *distortion, int *skippable, - int64_t *sse, int64_t ref_best_rd, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size, - int use_fast_coef_casting) { - MACROBLOCKD *const xd = &x->e_mbd; - const struct macroblockd_plane *const pd = &xd->plane[plane]; - struct rdcost_block_args args; - av1_zero(args); - args.cpi = cpi; - args.x = x; - args.best_rd = ref_best_rd; - args.use_fast_coef_costing = use_fast_coef_casting; - -#if CONFIG_EXT_TX - assert(tx_size < TX_SIZES); -#endif // CONFIG_EXT_TX - - if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size; - - av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left); - - block_rd_txfm(plane, 0, 0, 0, get_plane_block_size(bsize, pd), tx_size, - &args); - - if (args.exit_early) { - *rate = INT_MAX; - *distortion = INT64_MAX; - *sse = INT64_MAX; - *skippable = 0; - } else { - *distortion = args.rd_stats.dist; - *rate = args.rd_stats.rate; - *sse = args.rd_stats.sse; - *skippable = !x->plane[plane].eobs[0]; - } -} -#endif // CONFIG_SUPERTX - static int tx_size_cost(const AV1_COMP *const cpi, const MACROBLOCK *const x, BLOCK_SIZE bsize, TX_SIZE tx_size) { const AV1_COMMON *const cm = &cpi->common; @@ -8278,12 +8238,8 @@ static int64_t build_and_cost_compound_type( mbmi->mv[0].as_int = cur_mv[0].as_int; mbmi->mv[1].as_int = cur_mv[1].as_int; *out_rate_mv = rate_mv; - av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - preds0, strides, preds1, - strides); + av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0, strides, + preds1, strides); } av1_subtract_plane(x, bsize, 0); rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum, @@ -8293,11 +8249,8 @@ static int64_t build_and_cost_compound_type( best_rd_cur = rd; } else { - av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, -#if CONFIG_SUPERTX - 0, 0, -#endif // CONFIG_SUPERTX - preds0, strides, preds1, strides); + av1_build_wedge_inter_predictor_from_buf(xd, bsize, 0, 0, preds0, strides, + preds1, strides); av1_subtract_plane(x, bsize, 0); rd = estimate_yrd_for_sb(cpi, bsize, x, &rate_sum, &dist_sum, &tmp_skip_txfm_sb, &tmp_skip_sse_sb, INT64_MAX); @@ -10065,11 +10018,8 @@ static void pick_filter_intra_interframe( PALETTE_MODE_INFO *pmi_uv, int palette_ctx, int skip_mask, unsigned int *ref_costs_single, int64_t *best_rd, int64_t *best_intra_rd, PREDICTION_MODE *best_intra_mode, int *best_mode_index, int *best_skip2, - int *best_mode_skippable, -#if CONFIG_SUPERTX - int *returnrate_nocoef, -#endif // CONFIG_SUPERTX - int64_t *best_pred_rd, MB_MODE_INFO *best_mbmode, RD_STATS *rd_cost) { + int *best_mode_skippable, int64_t *best_pred_rd, MB_MODE_INFO *best_mbmode, + RD_STATS *rd_cost) { const AV1_COMMON *const cm = &cpi->common; MACROBLOCKD *const xd = &x->e_mbd; MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; @@ -10197,15 +10147,6 @@ static void pick_filter_intra_interframe( *best_mode_index = dc_mode_index; mbmi->mv[0].as_int = 0; rd_cost->rate = rate2; -#if CONFIG_SUPERTX - if (x->skip) - *returnrate_nocoef = rate2; - else - *returnrate_nocoef = rate2 - rate_y - rate_uv; - *returnrate_nocoef -= av1_cost_bit(av1_get_skip_prob(cm, xd), skippable); - *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd), - mbmi->ref_frame[0] != INTRA_FRAME); -#endif // CONFIG_SUPERTX rd_cost->dist = distortion2; rd_cost->rdcost = this_rd; *best_rd = this_rd; @@ -10226,12 +10167,8 @@ static void calc_target_weighted_pred(const AV1_COMMON *cm, const MACROBLOCK *x, void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, MACROBLOCK *x, int mi_row, int mi_col, - RD_STATS *rd_cost, -#if CONFIG_SUPERTX - int *returnrate_nocoef, -#endif // CONFIG_SUPERTX - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far) { + RD_STATS *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far) { const AV1_COMMON *const cm = &cpi->common; const RD_OPT *const rd_opt = &cpi->rd; const SPEED_FEATURES *const sf = &cpi->sf; @@ -10388,9 +10325,6 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, } rd_cost->rate = INT_MAX; -#if CONFIG_SUPERTX - *returnrate_nocoef = INT_MAX; -#endif // CONFIG_SUPERTX for (ref_frame = LAST_FRAME; ref_frame <= ALTREF_FRAME; ++ref_frame) { x->pred_mv_sad[ref_frame] = INT_MAX; @@ -11455,38 +11389,6 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, } rd_cost->rate = rate2; -#if CONFIG_SUPERTX - if (x->skip) - *returnrate_nocoef = rate2; - else - *returnrate_nocoef = rate2 - rate_y - rate_uv; - *returnrate_nocoef -= av1_cost_bit( - av1_get_skip_prob(cm, xd), disable_skip || skippable || this_skip2); - *returnrate_nocoef -= av1_cost_bit(av1_get_intra_inter_prob(cm, xd), - mbmi->ref_frame[0] != INTRA_FRAME); -#if CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION -#if CONFIG_WARPED_MOTION - set_ref_ptrs(cm, xd, mbmi->ref_frame[0], mbmi->ref_frame[1]); -#endif -#if CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION - MODE_INFO *const mi = xd->mi[0]; - const MOTION_MODE motion_allowed = motion_mode_allowed( -#if CONFIG_GLOBAL_MOTION - 0, xd->global_motion, -#endif // CONFIG_GLOBAL_MOTION -#if CONFIG_WARPED_MOTION - xd, -#endif - mi); - if (motion_allowed == WARPED_CAUSAL) - *returnrate_nocoef -= x->motion_mode_cost[bsize][mbmi->motion_mode]; - else if (motion_allowed == OBMC_CAUSAL) - *returnrate_nocoef -= x->motion_mode_cost1[bsize][mbmi->motion_mode]; -#else - *returnrate_nocoef -= x->motion_mode_cost[bsize][mbmi->motion_mode]; -#endif // CONFIG_MOTION_VAR && CONFIG_WARPED_MOTION -#endif // CONFIG_MOTION_VAR || CONFIG_WARPED_MOTION -#endif // CONFIG_SUPERTX rd_cost->dist = distortion2; rd_cost->rdcost = this_rd; best_rd = this_rd; @@ -11644,9 +11546,6 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, // Only try palette mode when the best mode so far is an intra mode. if (try_palette && !is_inter_mode(best_mbmode.mode)) { int rate2 = 0; -#if CONFIG_SUPERTX - int best_rate_nocoef; -#endif // CONFIG_SUPERTX int64_t distortion2 = 0, best_rd_palette = best_rd, this_rd, best_model_rd_palette = INT64_MAX; int skippable = 0, rate_overhead_palette = 0; @@ -11709,14 +11608,8 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, if (skippable) { rate2 -= (rd_stats_y.rate + rate_uv_tokenonly[uv_tx]); -#if CONFIG_SUPERTX - best_rate_nocoef = rate2; -#endif // CONFIG_SUPERTX rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1); } else { -#if CONFIG_SUPERTX - best_rate_nocoef = rate2 - (rd_stats_y.rate + rate_uv_tokenonly[uv_tx]); -#endif // CONFIG_SUPERTX rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0); } this_rd = RDCOST(x->rdmult, rate2, distortion2); @@ -11724,9 +11617,6 @@ void av1_rd_pick_inter_mode_sb(const AV1_COMP *cpi, TileDataEnc *tile_data, best_mode_index = 3; mbmi->mv[0].as_int = 0; rd_cost->rate = rate2; -#if CONFIG_SUPERTX - *returnrate_nocoef = best_rate_nocoef; -#endif // CONFIG_SUPERTX rd_cost->dist = distortion2; rd_cost->rdcost = this_rd; best_rd = this_rd; @@ -11751,9 +11641,6 @@ PALETTE_EXIT: #endif // CONFIG_EXT_INTRA pmi_uv, palette_ctx, 0, ref_costs_single, &best_rd, &best_intra_rd, &best_intra_mode, &best_mode_index, &best_skip2, &best_mode_skippable, -#if CONFIG_SUPERTX - returnrate_nocoef, -#endif // CONFIG_SUPERTX best_pred_rd, &best_mbmode, rd_cost); } #endif // CONFIG_FILTER_INTRA diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h index dbc7527fb..77953520f 100644 --- a/av1/encoder/rdopt.h +++ b/av1/encoder/rdopt.h @@ -99,12 +99,8 @@ unsigned int av1_high_get_sby_perpixel_variance(const AV1_COMP *cpi, void av1_rd_pick_inter_mode_sb(const struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x, int mi_row, int mi_col, - struct RD_STATS *rd_cost, -#if CONFIG_SUPERTX - int *returnrate_nocoef, -#endif // CONFIG_SUPERTX - BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx, - int64_t best_rd_so_far); + struct RD_STATS *rd_cost, BLOCK_SIZE bsize, + PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far); void av1_rd_pick_inter_mode_sb_seg_skip( const struct AV1_COMP *cpi, struct TileDataEnc *tile_data, @@ -121,21 +117,6 @@ void av1_check_ncobmc_rd(const struct AV1_COMP *cpi, struct macroblock *x, int mi_row, int mi_col); #endif // CONFIG_MOTION_VAR && CONFIG_NCOBMC -#if CONFIG_SUPERTX -#if CONFIG_VAR_TX -void av1_tx_block_rd_b(const AV1_COMP *cpi, MACROBLOCK *x, TX_SIZE tx_size, - int blk_row, int blk_col, int plane, int block, - int plane_bsize, const ENTROPY_CONTEXT *a, - const ENTROPY_CONTEXT *l, RD_STATS *rd_stats); -#endif - -void av1_txfm_rd_in_plane_supertx(MACROBLOCK *x, const AV1_COMP *cpi, int *rate, - int64_t *distortion, int *skippable, - int64_t *sse, int64_t ref_best_rd, int plane, - BLOCK_SIZE bsize, TX_SIZE tx_size, - int use_fast_coef_casting); -#endif // CONFIG_SUPERTX - #ifdef __cplusplus } // extern "C" #endif diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c index a2e24d66b..1680d78b7 100644 --- a/av1/encoder/tokenize.c +++ b/av1/encoder/tokenize.c @@ -509,11 +509,7 @@ static void tokenize_b(int plane, int block, int blk_row, int blk_col, const int eob = p->eobs[block]; const PLANE_TYPE type = pd->plane_type; const tran_low_t *qcoeff = BLOCK_OFFSET(p->qcoeff, block); -#if CONFIG_SUPERTX - const int segment_id = AOMMIN(mbmi->segment_id, mbmi->segment_id_supertx); -#else const int segment_id = mbmi->segment_id; -#endif // CONFIG_SUEPRTX const int16_t *scan, *nb; const TX_TYPE tx_type = av1_get_tx_type(type, xd, blk_row, blk_col, block, tx_size); @@ -881,48 +877,3 @@ void av1_tokenize_sb(const AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t, if (rate) *rate += arg.this_rate; } - -#if CONFIG_SUPERTX -void av1_tokenize_sb_supertx(const AV1_COMP *cpi, ThreadData *td, - TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row, - int mi_col, BLOCK_SIZE bsize, int *rate) { - const AV1_COMMON *const cm = &cpi->common; - MACROBLOCKD *const xd = &td->mb.e_mbd; - MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi; - TOKENEXTRA *t_backup = *t; - const int ctx = av1_get_skip_context(xd); - const int skip_inc = - !segfeature_active(&cm->seg, mbmi->segment_id_supertx, SEG_LVL_SKIP); - struct tokenize_b_args arg = { cpi, td, t, 0 }; - if (mbmi->skip) { - if (!dry_run) td->counts->skip[ctx][1] += skip_inc; - av1_reset_skip_context(xd, mi_row, mi_col, bsize); - if (dry_run) *t = t_backup; - return; - } - - if (!dry_run) { - int plane; - td->counts->skip[ctx][0] += skip_inc; - - for (plane = 0; plane < MAX_MB_PLANE; ++plane) { - av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b, - &arg); - (*t)->token = EOSB_TOKEN; - (*t)++; - } - } else if (dry_run == DRY_RUN_NORMAL) { - int plane; - for (plane = 0; plane < MAX_MB_PLANE; ++plane) - av1_foreach_transformed_block_in_plane(xd, bsize, plane, - set_entropy_context_b, &arg); - *t = t_backup; - } else if (dry_run == DRY_RUN_COSTCOEFFS) { - int plane; - for (plane = 0; plane < MAX_MB_PLANE; ++plane) - av1_foreach_transformed_block_in_plane(xd, bsize, plane, cost_coeffs_b, - &arg); - } - if (rate) *rate += arg.this_rate; -} -#endif // CONFIG_SUPERTX diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h index 20000e502..92d54034d 100644 --- a/av1/encoder/tokenize.h +++ b/av1/encoder/tokenize.h @@ -85,11 +85,6 @@ void av1_tokenize_color_map(const MACROBLOCK *const x, int plane, int block, void av1_tokenize_sb(const struct AV1_COMP *cpi, struct ThreadData *td, TOKENEXTRA **t, RUN_TYPE dry_run, BLOCK_SIZE bsize, int *rate, const int mi_row, const int mi_col); -#if CONFIG_SUPERTX -void av1_tokenize_sb_supertx(const struct AV1_COMP *cpi, struct ThreadData *td, - TOKENEXTRA **t, RUN_TYPE dry_run, int mi_row, - int mi_col, BLOCK_SIZE bsize, int *rate); -#endif extern const int16_t *av1_dct_value_cost_ptr; /* TODO: The Token field should be broken out into a separate char array to diff --git a/build/cmake/aom_config_defaults.cmake b/build/cmake/aom_config_defaults.cmake index fe1fe0e28..947427353 100644 --- a/build/cmake/aom_config_defaults.cmake +++ b/build/cmake/aom_config_defaults.cmake @@ -199,7 +199,6 @@ set(CONFIG_SEGMENT_ZEROMV 0 CACHE NUMBER "AV1 experiment flag.") set(CONFIG_SIMPLE_BWD_ADAPT 1 CACHE NUMBER "AV1 experiment flag.") set(CONFIG_SMOOTH_HV 1 CACHE NUMBER "AV1 experiment flag.") set(CONFIG_STRIPED_LOOP_RESTORATION 0 CACHE NUMBER "AV1 experiment flag.") -set(CONFIG_SUPERTX 0 CACHE NUMBER "AV1 experiment flag.") set(CONFIG_TEMPMV_SIGNALING 1 CACHE NUMBER "AV1 experiment flag.") set(CONFIG_TMV 0 CACHE NUMBER "AV1 experiment flag.") set(CONFIG_TPL_MV 0 CACHE NUMBER "AV1 experiment flag.") diff --git a/build/cmake/aom_experiment_deps.cmake b/build/cmake/aom_experiment_deps.cmake index 7776aeeb3..cd77f9955 100644 --- a/build/cmake/aom_experiment_deps.cmake +++ b/build/cmake/aom_experiment_deps.cmake @@ -89,13 +89,6 @@ macro (fix_experiment_configs) endif () endif () - if (CONFIG_EXT_PARTITION_TYPES) - if (CONFIG_SUPERTX) - change_config_and_warn(CONFIG_SUPERTX 0 - CONFIG_EXT_PARTITION_TYPES) - endif () - endif () - if (CONFIG_JNT_COMP) if (NOT CONFIG_FRAME_MARKER) change_config_and_warn(CONFIG_FRAME_MARKER 1 CONFIG_JNT_COMP) diff --git a/configure b/configure index fce2ec8e6..e21c7e4b1 100755 --- a/configure +++ b/configure @@ -267,7 +267,6 @@ EXPERIMENT_LIST=" compound_segment global_motion new_quant - supertx ans loop_restoration striped_loop_restoration @@ -562,7 +561,6 @@ post_process_cmdline() { enabled pvq && disable_feature lgt enabled pvq && disable_feature mrc_tx enabled lv_map && disable_feature mrc_tx - enabled supertx && disable_feature mrc_tx enabled coef_interleave && disable_feature mrc_tx enabled pvq && disable_feature palette_throughput enabled mrc_tx && enable_feature ext_tx @@ -621,11 +619,6 @@ post_process_cmdline() { log_echo "disabling fp_mb_stats" disable_feature fp_mb_stats fi - if enabled supertx; then - log_echo "ext_partition_types not compatible with supertx;" - log_echo "disabling supertx" - disable_feature supertx - fi if ! enabled rect_tx; then log_echo "ext_partition_types requires rect_tx;" log_echo "enabling rect_tx;" diff --git a/tools/aom_entropy_optimizer.c b/tools/aom_entropy_optimizer.c index 1a3b17e23..d68c66af1 100644 --- a/tools/aom_entropy_optimizer.c +++ b/tools/aom_entropy_optimizer.c @@ -722,19 +722,6 @@ int main(int argc, const char **argv) { #endif /* supertx experiment */ -#if CONFIG_SUPERTX - cts_each_dim[0] = PARTITION_SUPERTX_CONTEXTS; - cts_each_dim[1] = TX_SIZES; - cts_each_dim[2] = 2; - optimize_entropy_table( - &fc.supertx[0][0][0], probsfile, 3, cts_each_dim, NULL, 1, - "static const aom_prob\n" - "default_supertx_prob[PARTITION_SUPERTX_CONTEXTS][TX_SIZES]"); - optimize_cdf_table(&fc.supertx[0][0][0], probsfile, 3, cts_each_dim, - "static const aom_cdf_prob " - "default_supertx_cdf[PARTITION_SUPERTX_CONTEXTS][TX_SIZES]" - "[CDF_SIZE(2)]"); -#endif /* filter_intra experiment */ #if CONFIG_FILTER_INTRA -- GitLab