Commit 7fc6b2ac authored by Sebastien Alaiwan's avatar Sebastien Alaiwan

Remove LGT_FROM_PRED experiment

This experiment has been abandonned for AV1.

Change-Id: I18cf1354df928a0614a1e58b718cd96ee7999925
parent b4f31036
This diff is collapsed.
......@@ -331,9 +331,6 @@ typedef struct MB_MODE_INFO {
#if CONFIG_TXK_SEL
TX_TYPE txk_type[MAX_SB_SQUARE / (TX_SIZE_W_MIN * TX_SIZE_H_MIN)];
#endif
#if CONFIG_LGT_FROM_PRED
int use_lgt;
#endif
#if CONFIG_FILTER_INTRA
FILTER_INTRA_MODE_INFO filter_intra_mode_info;
......@@ -950,36 +947,6 @@ static INLINE int get_ext_tx_types(TX_SIZE tx_size, BLOCK_SIZE bs, int is_inter,
return av1_num_ext_tx_set[set_type];
}
#if CONFIG_LGT_FROM_PRED
static INLINE int is_lgt_allowed(PREDICTION_MODE mode, TX_SIZE tx_size) {
if (!LGT_FROM_PRED_INTRA && !is_inter_mode(mode)) return 0;
if (!LGT_FROM_PRED_INTER && is_inter_mode(mode)) return 0;
switch (mode) {
case D45_PRED:
case D63_PRED:
case D117_PRED:
case V_PRED:
#if CONFIG_SMOOTH_HV
case SMOOTH_V_PRED:
#endif
return tx_size_wide[tx_size] <= 8;
case D135_PRED:
case D153_PRED:
case D207_PRED:
case H_PRED:
#if CONFIG_SMOOTH_HV
case SMOOTH_H_PRED:
#endif
return tx_size_high[tx_size] <= 8;
case DC_PRED:
case SMOOTH_PRED: return 0;
case PAETH_PRED:
default: return tx_size_wide[tx_size] <= 8 || tx_size_high[tx_size] <= 8;
}
}
#endif // CONFIG_LGT_FROM_PRED
static INLINE int is_rect_tx_allowed_bsize(BLOCK_SIZE bsize) {
static const char LUT[BLOCK_SIZES_ALL] = {
0, // BLOCK_2X2
......
......@@ -1976,23 +1976,6 @@ static const aom_prob default_compound_idx_probs[COMP_INDEX_CONTEXTS] = {
};
#endif // CONFIG_JNT_COMP
#if CONFIG_LGT_FROM_PRED
static const aom_prob default_intra_lgt_prob[LGT_SIZES][INTRA_MODES] = {
{ 255, 208, 208, 180, 230, 208, 194, 214, 220, 255,
#if CONFIG_SMOOTH_HV
220, 220,
#endif
230 },
{ 255, 192, 216, 180, 180, 180, 180, 200, 200, 255,
#if CONFIG_SMOOTH_HV
220, 220,
#endif
222 },
};
static const aom_prob default_inter_lgt_prob[LGT_SIZES] = { 230, 230 };
#endif // CONFIG_LGT_FROM_PRED
#if CONFIG_FILTER_INTRA
static const aom_prob default_filter_intra_probs[2] = { 103, 231 };
......@@ -5837,10 +5820,6 @@ static void init_mode_probs(FRAME_CONTEXT *fc) {
av1_copy(fc->filter_intra_probs, default_filter_intra_probs);
av1_copy(fc->filter_intra_mode_cdf, default_filter_intra_mode_cdf);
#endif // CONFIG_FILTER_INTRA
#if CONFIG_LGT_FROM_PRED
av1_copy(fc->intra_lgt_prob, default_intra_lgt_prob);
av1_copy(fc->inter_lgt_prob, default_inter_lgt_prob);
#endif // CONFIG_LGT_FROM_PRED
#if CONFIG_LOOP_RESTORATION
av1_copy(fc->switchable_restore_cdf, default_switchable_restore_cdf);
#if CONFIG_NEW_MULTISYMBOL
......@@ -6033,23 +6012,6 @@ void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
av1_mode_mv_merge_probs(pre_fc->skip_probs[i], counts->skip[i]);
#endif // !CONFIG_NEW_MULTISYMBOL
#if CONFIG_LGT_FROM_PRED
int j;
if (LGT_FROM_PRED_INTRA) {
for (i = TX_4X4; i < LGT_SIZES; ++i) {
for (j = 0; j < INTRA_MODES; ++j)
fc->intra_lgt_prob[i][j] = av1_mode_mv_merge_probs(
pre_fc->intra_lgt_prob[i][j], counts->intra_lgt[i][j]);
}
}
if (LGT_FROM_PRED_INTER) {
for (i = TX_4X4; i < LGT_SIZES; ++i) {
fc->inter_lgt_prob[i] = av1_mode_mv_merge_probs(pre_fc->inter_lgt_prob[i],
counts->inter_lgt[i]);
}
}
#endif // CONFIG_LGT_FROM_PRED
if (cm->seg.temporal_update) {
for (i = 0; i < PREDICTION_PROBS; i++)
fc->seg.pred_probs[i] = av1_mode_mv_merge_probs(pre_fc->seg.pred_probs[i],
......
......@@ -346,10 +346,6 @@ typedef struct frame_contexts {
[CDF_SIZE(TX_TYPES)];
aom_cdf_prob inter_ext_tx_cdf[EXT_TX_SETS_INTER][EXT_TX_SIZES]
[CDF_SIZE(TX_TYPES)];
#if CONFIG_LGT_FROM_PRED
aom_prob intra_lgt_prob[LGT_SIZES][INTRA_MODES];
aom_prob inter_lgt_prob[LGT_SIZES];
#endif // CONFIG_LGT_FROM_PRED
aom_prob delta_q_prob[DELTA_Q_PROBS];
#if CONFIG_EXT_DELTA_Q
#if CONFIG_LOOPFILTER_LEVEL
......@@ -468,10 +464,6 @@ typedef struct FRAME_COUNTS {
#if CONFIG_INTRABC
nmv_context_counts dv;
#endif
#if CONFIG_LGT_FROM_PRED
unsigned int intra_lgt[LGT_SIZES][INTRA_MODES][2];
unsigned int inter_lgt[LGT_SIZES][2];
#endif // CONFIG_LGT_FROM_PRED
unsigned int delta_q[DELTA_Q_PROBS][2];
#if CONFIG_EXT_DELTA_Q
#if CONFIG_LOOPFILTER_LEVEL
......
......@@ -728,15 +728,6 @@ typedef enum {
} METADATA_TYPE;
#endif
#if CONFIG_LGT_FROM_PRED
#define LGT_SIZES 2
// Note: at least one of LGT_FROM_PRED_INTRA and LGT_FROM_PRED_INTER must be 1
#define LGT_FROM_PRED_INTRA 1
#define LGT_FROM_PRED_INTER 1
// LGT_SL_INTRA: LGTs with a mode-dependent first self-loop and a break point
#define LGT_SL_INTRA 0
#endif // CONFIG_LGT_FROM_PRED
#ifdef __cplusplus
} // extern "C"
#endif
......
This diff is collapsed.
......@@ -39,15 +39,6 @@ int get_lgt8(const TxfmParam *txfm_param, int is_col,
const tran_high_t **lgtmtx);
#endif // CONFIG_LGT
#if CONFIG_LGT_FROM_PRED
void get_lgt4_from_pred(const TxfmParam *txfm_param, int is_col,
const tran_high_t **lgtmtx, int ntx);
void get_lgt8_from_pred(const TxfmParam *txfm_param, int is_col,
const tran_high_t **lgtmtx, int ntx);
void get_lgt16up_from_pred(const TxfmParam *txfm_param, int is_col,
const tran_high_t **lgtmtx, int ntx);
#endif // CONFIG_LGT_FROM_PRED
#if CONFIG_HIGHBITDEPTH
typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
......@@ -68,9 +59,6 @@ void av1_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
TxfmParam *txfm_param);
void av1_inverse_transform_block(const MACROBLOCKD *xd,
const tran_low_t *dqcoeff,
#if CONFIG_LGT_FROM_PRED
PREDICTION_MODE mode,
#endif
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
uint8_t *mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
......
......@@ -99,9 +99,6 @@ static INLINE const SCAN_ORDER *get_scan(const AV1_COMMON *cm, TX_SIZE tx_size,
// use the DCT_DCT scan order for MRC_DCT for now
if (tx_type == MRC_DCT) tx_type = DCT_DCT;
#endif // CONFIG_MRC_TX
#if CONFIG_LGT_FROM_PRED
if (mbmi->use_lgt) tx_type = DCT_DCT;
#endif
const int is_inter = is_inter_block(mbmi);
#if CONFIG_ADAPT_SCAN
(void)mbmi;
......
......@@ -245,18 +245,12 @@ static void read_mv_probs(nmv_context *ctx, int allow_hp, aom_reader *r) {
#endif
static void inverse_transform_block(MACROBLOCKD *xd, int plane,
#if CONFIG_LGT_FROM_PRED
PREDICTION_MODE mode,
#endif
const TX_TYPE tx_type,
const TX_SIZE tx_size, uint8_t *dst,
int stride, int16_t scan_line, int eob) {
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *const dqcoeff = pd->dqcoeff;
av1_inverse_transform_block(xd, dqcoeff,
#if CONFIG_LGT_FROM_PRED
mode,
#endif
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
xd->mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
......@@ -304,11 +298,7 @@ static void predict_and_reconstruct_intra_block(
if (eob) {
uint8_t *dst =
&pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
inverse_transform_block(xd, plane,
#if CONFIG_LGT_FROM_PRED
mbmi->mode,
#endif
tx_type, tx_size, dst, pd->dst.stride,
inverse_transform_block(xd, plane, tx_type, tx_size, dst, pd->dst.stride,
max_scan_line, eob);
}
}
......@@ -356,11 +346,7 @@ static void decode_reconstruct_tx(AV1_COMMON *cm, MACROBLOCKD *const xd,
cm, xd, plane, sc, blk_col, blk_row, plane_tx_size, tx_type,
&max_scan_line, r, mbmi->segment_id);
#endif // CONFIG_LV_MAP
inverse_transform_block(xd, plane,
#if CONFIG_LGT_FROM_PRED
mbmi->mode,
#endif
tx_type, plane_tx_size,
inverse_transform_block(xd, plane, tx_type, plane_tx_size,
&pd->dst.buf[(blk_row * pd->dst.stride + blk_col)
<< tx_size_wide_log2[0]],
pd->dst.stride, max_scan_line, eob);
......
......@@ -928,9 +928,6 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
(void)block;
TX_TYPE *tx_type = &mbmi->txk_type[(blk_row << 4) + blk_col];
#endif
#if CONFIG_LGT_FROM_PRED
mbmi->use_lgt = 0;
#endif
if (!FIXED_TX_TYPE) {
const TX_SIZE square_tx_size = txsize_sqr_map[tx_size];
......@@ -948,7 +945,6 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
// there is no need to read the tx_type
assert(eset != 0);
#if !CONFIG_LGT_FROM_PRED
if (inter_block) {
*tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
......@@ -970,69 +966,6 @@ void av1_read_tx_type(const AV1_COMMON *const cm, MACROBLOCKD *xd,
av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
#endif
}
#else
// only signal tx_type when lgt is not allowed or not selected
if (inter_block) {
if (LGT_FROM_PRED_INTER) {
if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used) {
mbmi->use_lgt =
aom_read(r, ec_ctx->inter_lgt_prob[square_tx_size], ACCT_STR);
#if CONFIG_ENTROPY_STATS
if (counts) ++counts->inter_lgt[square_tx_size][mbmi->use_lgt];
#endif // CONFIG_ENTROPY_STATS
}
if (!mbmi->use_lgt) {
*tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
#if CONFIG_ENTROPY_STATS
if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
#endif // CONFIG_ENTROPY_STATS
} else {
*tx_type = DCT_DCT; // assign a dummy tx_type
}
} else {
*tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
#if CONFIG_ENTROPY_STATS
if (counts) ++counts->inter_ext_tx[eset][square_tx_size][*tx_type];
#endif // CONFIG_ENTROPY_STATS
}
} else if (ALLOW_INTRA_EXT_TX) {
if (LGT_FROM_PRED_INTRA) {
if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used) {
mbmi->use_lgt =
aom_read(r, ec_ctx->intra_lgt_prob[square_tx_size][mbmi->mode],
ACCT_STR);
#if CONFIG_ENTROPY_STATS
if (counts)
++counts->intra_lgt[square_tx_size][mbmi->mode][mbmi->use_lgt];
#endif // CONFIG_ENTROPY_STATS
}
if (!mbmi->use_lgt) {
*tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
#if CONFIG_ENTROPY_STATS
if (counts)
++counts
->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
#endif // CONFIG_ENTROPY_STATS
} else {
*tx_type = DCT_DCT; // assign a dummy tx_type
}
} else {
*tx_type = av1_ext_tx_inv[tx_set_type][aom_read_symbol(
r, ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
av1_num_ext_tx_set[tx_set_type], ACCT_STR)];
#if CONFIG_ENTROPY_STATS
if (counts)
++counts->intra_ext_tx[eset][square_tx_size][mbmi->mode][*tx_type];
#endif // CONFIG_ENTROPY_STATS
}
}
#endif // CONFIG_LGT_FROM_PRED
} else {
*tx_type = DCT_DCT;
}
......
......@@ -1222,7 +1222,6 @@ void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd,
// is no need to send the tx_type
assert(eset > 0);
assert(av1_ext_tx_used[tx_set_type][tx_type]);
#if !CONFIG_LGT_FROM_PRED
if (is_inter) {
aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
......@@ -1246,39 +1245,6 @@ void av1_write_tx_type(const AV1_COMMON *const cm, const MACROBLOCKD *xd,
av1_num_ext_tx_set[tx_set_type]);
#endif
}
#else
// only signal tx_type when lgt is not allowed or not selected
if (is_inter) {
if (LGT_FROM_PRED_INTER) {
if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used)
aom_write(w, mbmi->use_lgt, ec_ctx->inter_lgt_prob[square_tx_size]);
if (!mbmi->use_lgt)
aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
av1_num_ext_tx_set[tx_set_type]);
} else {
aom_write_symbol(w, av1_ext_tx_ind[tx_set_type][tx_type],
ec_ctx->inter_ext_tx_cdf[eset][square_tx_size],
av1_num_ext_tx_set[tx_set_type]);
}
} else if (ALLOW_INTRA_EXT_TX) {
if (LGT_FROM_PRED_INTRA) {
if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used)
aom_write(w, mbmi->use_lgt,
ec_ctx->intra_lgt_prob[square_tx_size][mbmi->mode]);
if (!mbmi->use_lgt)
aom_write_symbol(
w, av1_ext_tx_ind[tx_set_type][tx_type],
ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
av1_num_ext_tx_set[tx_set_type]);
} else {
aom_write_symbol(
w, av1_ext_tx_ind[tx_set_type][tx_type],
ec_ctx->intra_ext_tx_cdf[eset][square_tx_size][mbmi->mode],
av1_num_ext_tx_set[tx_set_type]);
}
}
#endif // CONFIG_LGT_FROM_PRED
}
}
}
......
......@@ -258,10 +258,6 @@ struct macroblock {
int quarter_tx_size_cost[2];
#endif
int txfm_partition_cost[TXFM_PARTITION_CONTEXTS][2];
#if CONFIG_LGT_FROM_PRED
int intra_lgt_cost[LGT_SIZES][INTRA_MODES][2];
int inter_lgt_cost[LGT_SIZES][2];
#endif
int inter_tx_type_costs[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES];
int intra_tx_type_costs[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
[TX_TYPES];
......
......@@ -1070,20 +1070,10 @@ static void get_masked_residual32(const int16_t **input, int *input_stride,
}
#endif // CONFIG_MRC_TX
#if CONFIG_LGT || CONFIG_LGT_FROM_PRED
#if CONFIG_LGT
static void flgt4(const tran_low_t *input, tran_low_t *output,
const tran_high_t *lgtmtx) {
if (!lgtmtx) assert(0);
#if CONFIG_LGT_FROM_PRED
// For DCT/ADST, use butterfly implementations
if (lgtmtx[0] == DCT4) {
fdct4(input, output);
return;
} else if (lgtmtx[0] == ADST4) {
fadst4(input, output);
return;
}
#endif // CONFIG_LGT_FROM_PRED
// evaluate s[j] = sum of all lgtmtx[j][i]*input[i] over i=1,...,4
tran_high_t s[4] = { 0 };
......@@ -1096,16 +1086,6 @@ static void flgt4(const tran_low_t *input, tran_low_t *output,
static void flgt8(const tran_low_t *input, tran_low_t *output,
const tran_high_t *lgtmtx) {
if (!lgtmtx) assert(0);
#if CONFIG_LGT_FROM_PRED
// For DCT/ADST, use butterfly implementations
if (lgtmtx[0] == DCT8) {
fdct8(input, output);
return;
} else if (lgtmtx[0] == ADST8) {
fadst8(input, output);
return;
}
#endif // CONFIG_LGT_FROM_PRED
// evaluate s[j] = sum of all lgtmtx[j][i]*input[i] over i=1,...,8
tran_high_t s[8] = { 0 };
......@@ -1114,140 +1094,7 @@ static void flgt8(const tran_low_t *input, tran_low_t *output,
for (int i = 0; i < 8; ++i) output[i] = (tran_low_t)fdct_round_shift(s[i]);
}
#endif // CONFIG_LGT || CONFIG_LGT_FROM_PRED
#if CONFIG_LGT_FROM_PRED
static void flgt16up(const tran_low_t *input, tran_low_t *output,
const tran_high_t *lgtmtx) {
if (lgtmtx[0] == DCT16) {
fdct16(input, output);
return;
} else if (lgtmtx[0] == ADST16) {
fadst16(input, output);
return;
} else if (lgtmtx[0] == DCT32) {
fdct32(input, output);
return;
} else if (lgtmtx[0] == ADST32) {
fhalfright32(input, output);
return;
} else {
assert(0);
}
}
typedef void (*FlgtFunc)(const tran_low_t *input, tran_low_t *output,
const tran_high_t *lgtmtx);
static FlgtFunc flgt_func[4] = { flgt4, flgt8, flgt16up, flgt16up };
typedef void (*GetLgtFunc)(const TxfmParam *txfm_param, int is_col,
const tran_high_t *lgtmtx[], int ntx);
static GetLgtFunc get_lgt_func[4] = { get_lgt4_from_pred, get_lgt8_from_pred,
get_lgt16up_from_pred,
get_lgt16up_from_pred };
// this inline function corresponds to the up scaling before the first
// transform in the av1_fht* functions
static INLINE tran_low_t fwd_upscale_wrt_txsize(const tran_high_t val,
const TX_SIZE tx_size) {
switch (tx_size) {
case TX_4X4: return (tran_low_t)val << 4;
case TX_8X8:
case TX_4X16:
case TX_16X4:
case TX_8X32:
case TX_32X8: return (tran_low_t)val << 2;
case TX_4X8:
case TX_8X4:
case TX_8X16:
case TX_16X8: return (tran_low_t)fdct_round_shift(val * 4 * Sqrt2);
default: assert(0); break;
}
return 0;
}
// This inline function corresponds to the bit shift after the second
// transform in the av1_fht* functions
static INLINE tran_low_t fwd_downscale_wrt_txsize(const tran_low_t val,
const TX_SIZE tx_size) {
switch (tx_size) {
case TX_4X4: return (val + 1) >> 2;
case TX_4X8:
case TX_8X4:
case TX_8X8:
case TX_4X16:
case TX_16X4: return (val + (val < 0)) >> 1;
case TX_8X16:
case TX_16X8: return val;
case TX_8X32:
case TX_32X8: return ROUND_POWER_OF_TWO_SIGNED(val, 2);
default: assert(0); break;
}
return 0;
}
void flgt2d_from_pred_c(const int16_t *input, tran_low_t *output, int stride,
TxfmParam *txfm_param) {
const TX_SIZE tx_size = txfm_param->tx_size;
const int w = tx_size_wide[tx_size];
const int h = tx_size_high[tx_size];
const int wlog2 = tx_size_wide_log2[tx_size];
const int hlog2 = tx_size_high_log2[tx_size];
assert(w <= 8 || h <= 8);
int i, j;
tran_low_t out[256]; // max size: 8x32 and 32x8
tran_low_t temp_in[32], temp_out[32];
const tran_high_t *lgtmtx_col[1];
const tran_high_t *lgtmtx_row[1];
get_lgt_func[hlog2 - 2](txfm_param, 1, lgtmtx_col, w);
get_lgt_func[wlog2 - 2](txfm_param, 0, lgtmtx_row, h);
// For forward transforms, to be consistent with av1_fht functions, we apply
// short transform first and long transform second.
if (w < h) {
// Row transforms
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j)
temp_in[j] = fwd_upscale_wrt_txsize(input[i * stride + j], tx_size);
flgt_func[wlog2 - 2](temp_in, temp_out, lgtmtx_row[0]);
// right shift of 2 bits here in fht8x16 and fht16x8
for (j = 0; j < w; ++j)
out[j * h + i] = (tx_size == TX_16X8 || tx_size == TX_8X16)
? ROUND_POWER_OF_TWO_SIGNED(temp_out[j], 2)
: temp_out[j];
}
// Column transforms
for (i = 0; i < w; ++i) {
for (j = 0; j < h; ++j) temp_in[j] = out[j + i * h];
flgt_func[hlog2 - 2](temp_in, temp_out, lgtmtx_col[0]);
for (j = 0; j < h; ++j)
output[j * w + i] = fwd_downscale_wrt_txsize(temp_out[j], tx_size);
}
} else {
// Column transforms
for (i = 0; i < w; ++i) {
for (j = 0; j < h; ++j)
temp_in[j] = fwd_upscale_wrt_txsize(input[j * stride + i], tx_size);
flgt_func[hlog2 - 2](temp_in, temp_out, lgtmtx_col[0]);
// fht8x16 and fht16x8 have right shift of 2 bits here
for (j = 0; j < h; ++j)
out[j * w + i] = (tx_size == TX_16X8 || tx_size == TX_8X16)
? ROUND_POWER_OF_TWO_SIGNED(temp_out[j], 2)
: temp_out[j];
}
// Row transforms
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) temp_in[j] = out[j + i * w];
flgt_func[wlog2 - 2](temp_in, temp_out, lgtmtx_row[0]);
for (j = 0; j < w; ++j)
output[j + i * w] = fwd_downscale_wrt_txsize(temp_out[j], tx_size);
}
}
}
#endif // CONFIG_LGT_FROM_PRED
#endif // CONFIG_LGT
// TODO(sarahparker) these functions will be removed once the highbitdepth
// codepath works properly for rectangular transforms. They have almost
......
......@@ -4485,7 +4485,6 @@ void av1_update_tx_type_count(const AV1_COMMON *cm, MACROBLOCKD *xd,
const int eset =
get_ext_tx_set(tx_size, bsize, is_inter, cm->reduced_tx_set_used);
if (eset > 0) {
#if !CONFIG_LGT_FROM_PRED
const TxSetType tx_set_type = get_ext_tx_set_type(
tx_size, bsize, is_inter, cm->reduced_tx_set_used);
if (is_inter) {
......@@ -4525,45 +4524,6 @@ void av1_update_tx_type_count(const AV1_COMMON *cm, MACROBLOCKD *xd,
av1_num_ext_tx_set[tx_set_type]);
#endif
}
#else
(void)tx_type;
(void)fc;
(void)allow_update_cdf;
if (is_inter) {
if (LGT_FROM_PRED_INTER) {
if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used)
++counts->inter_lgt[txsize_sqr_map[tx_size]][mbmi->use_lgt];
#if CONFIG_ENTROPY_STATS
if (!mbmi->use_lgt)
++counts->inter_ext_tx[eset][txsize_sqr_map[tx_size]][tx_type];
else
#endif // CONFIG_ENTROPY_STATS
mbmi->tx_type = DCT_DCT;
} else {
#if CONFIG_ENTROPY_STATS
++counts->inter_ext_tx[eset][txsize_sqr_map[tx_size]][tx_type];
#endif // CONFIG_ENTROPY_STATS
}
} else {
if (LGT_FROM_PRED_INTRA) {
if (is_lgt_allowed(mbmi->mode, tx_size) && !cm->reduced_tx_set_used)
++counts->intra_lgt[txsize_sqr_map[tx_size]][mbmi->mode]
[mbmi->use_lgt];
#if CONFIG_ENTROPY_STATS
if (!mbmi->use_lgt)
++counts->intra_ext_tx[eset][txsize_sqr_map[tx_size]][mbmi->mode]
[tx_type];
else
#endif // CONFIG_ENTROPY_STATS
mbmi->tx_type = DCT_DCT;
} else {
#if CONFIG_ENTROPY_STATS
++counts->intra_ext_tx[eset][txsize_sqr_map[tx_size]][mbmi->mode]
[tx_type];
#endif // CONFIG_ENTROPY_STATS
}
}
#endif // CONFIG_LGT_FROM_PRED
}
}
}
......
......@@ -479,7 +479,7 @@ void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
TxfmParam txfm_param;
#if CONFIG_DIST_8X8 || CONFIG_LGT_FROM_PRED || CONFIG_MRC_TX
#if CONFIG_DIST_8X8 || CONFIG_MRC_TX
uint8_t *dst;
const int dst_stride = pd->dst.stride;
#if CONFIG_DIST_8X8
......@@ -506,9 +506,9 @@ void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
qparam.iqmatrix = iqmatrix;
#endif // CONFIG_AOM_QM
#if CONFIG_DIST_8X8 || CONFIG_LGT_FROM_PRED || CONFIG_MRC_TX
#if CONFIG_DIST_8X8 || CONFIG_MRC_TX
dst = &pd->dst.buf[(blk_row * dst_stride + blk_col) << tx_size_wide_log2[0]];
#endif // CONFIG_DIST_8X8 || CONFIG_LGT_FROM_PRED ||
#endif // CONFIG_DIST_8X8 ||
// CONFIG_MRC_TX
#if CONFIG_DIST_8X8
......@@ -545,7 +545,7 @@ void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
#if CONFIG_MRC_TX || CONFIG_LGT
txfm_param.is_inter = is_inter_block(mbmi);
#endif
#if CONFIG_MRC_TX || CONFIG_LGT_FROM_PRED
#if CONFIG_MRC_TX
txfm_param.dst = dst;
txfm_param.stride = dst_stride;
#if CONFIG_MRC_TX
......@@ -554,11 +554,7 @@ void av1_xform_quant(const AV1_COMMON *cm, MACROBLOCK *x, int plane, int block,
txfm_param.mask = BLOCK_OFFSET(xd->mrc_mask, block);
#endif // SIGNAL_ANY_MRC_MASK
#endif // CONFIG_MRC_TX
#if CONFIG_LGT_FROM_PRED
txfm_param.mode = mbmi->mode;
txfm_param.use_lgt = mbmi->use_lgt;
#endif // CONFIG_LGT_FROM_PRED
#endif // CONFIG_MRC_TX || CONFIG_LGT_FROM_PRED
#endif // CONFIG_MRC_TX
txfm_param.bd = xd->bd;
const int is_hbd = get_bitdepth_data_path_index(xd);
......@@ -628,15 +624,9 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
if (p->eobs[block] != 0)
{
#if CONFIG_LGT_FROM_PRED
PREDICTION_MODE mode = xd->mi[0]->mbmi.mode;
#endif // CONFIG_LGT_FROM_PRED
TX_TYPE tx_type =
av1_get_tx_type(pd->plane_type, xd, blk_row, blk_col, block, tx_size);
av1_inverse_transform_block(xd, dqcoeff,
#if CONFIG_LGT_FROM_PRED
mode,
#endif
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
......@@ -901,9 +891,6 @@ void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
}
av1_inverse_transform_block(xd, dqcoeff,
#if CONFIG_LGT_FROM_PRED
xd->mi[0]->mbmi.mode,
#endif
#if CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
mrc_mask,
#endif // CONFIG_MRC_TX && SIGNAL_ANY_MRC_MASK
......
......@@ -490,14 +490,6 @@ void av1_fwd_txfm(const int16_t *src_diff, tran_low_t *coeff, int diff_stride,
TxfmParam *txfm_param) {
assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
const TX_SIZE tx_size = txfm_param->tx_size;
#if CONFIG_LGT_FROM_PRED
if (txfm_param->use_lgt) {
// if use_lgt is 1, it will override tx_type
assert(is_lgt_allowed(txfm_param->mode, tx_size));
flgt2d_from_pred_c(src_diff, coeff, diff_stride, txfm_param);
return;
}
#endif // CONFIG_LGT_FROM_PRED
switch (tx_size) {
#if CONFIG_TX64X64
case TX_64X64:
......
......@@ -244,22 +244,6 @@ void av1_fill_mode_rates(AV1_COMMON *const cm, MACROBLOCK *x,
#endif
}
#if CONFIG_LGT_FROM_PRED
if (LGT_FROM_PRED_INTRA) {
for (i = 0; i < LGT_SIZES; ++i) {
for (j = 0; j < INTRA_MODES; ++j) {
x->intra_lgt_cost[i][j][0] = av1_cost_bit(fc->intra_lgt_prob[i][j], 0);
x->intra_lgt_cost[i][j][1] = av1_cost_bit(fc->intra_lgt_prob[i][j], 1);
}
}
}
if (LGT_FROM_PRED_INTER) {
for (i = 0; i < LGT_SIZES; ++i) {
x->inter_lgt_cost[i][0] = av1_cost_bit(fc->inter_lgt_prob[i], 0);
x->inter_lgt_cost[i][1] = av1_cost_bit(fc->inter_lgt_prob[i], 1);
}
}
#endif // CONFIG_LGT_FROM_PRED
for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
int s;
for (s = 1; s < EXT_TX_SETS_INTER; ++s) {
......