Commit c28dbdf6 authored by Debargha Mukherjee's avatar Debargha Mukherjee Committed by Gerrit Code Review

Merge "Adds 1D transforms for ADST/FlipADST to make 16" into nextgenv2

parents d324c6b0 1b175593
......@@ -103,20 +103,6 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 7,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 8,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 9,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 10,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 11,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 12,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 13,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 14,
VPX_BITS_8, 256),
make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_sse2, 15,
VPX_BITS_8, 256)));
#endif // !CONFIG_EXT_TX
#endif // HAVE_SSE2
......
......@@ -102,20 +102,6 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 7,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 8,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 9,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 10,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 11,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 12,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 13,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 14,
VPX_BITS_8, 16),
make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 15,
VPX_BITS_8, 16)));
#endif // !CONFIG_EXT_TX
#endif // HAVE_SSE2
......
......@@ -102,20 +102,6 @@ INSTANTIATE_TEST_CASE_P(
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 7,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 8,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 9,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 10,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 11,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 12,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 13,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 14,
VPX_BITS_8, 64),
make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 15,
VPX_BITS_8, 64)));
#endif // !CONFIG_EXT_TX
#endif // HAVE_SSE2
......
......@@ -383,10 +383,10 @@ static INLINE int supertx_enabled(const MB_MODE_INFO *mbmi) {
#define USE_MSKTX_FOR_32X32 0
static const int num_ext_tx_set_inter[EXT_TX_SETS_INTER] = {
1, 19, 12, 2
1, 16, 12, 2
};
static const int num_ext_tx_set_intra[EXT_TX_SETS_INTRA] = {
1, 17, 10
1, 12, 10
};
#if EXT_TX_SIZES == 4
......@@ -437,17 +437,17 @@ static const int use_inter_ext_tx_for_txsize[EXT_TX_SETS_INTER][TX_SIZES] = {
// Transform types used in each intra set
static const int ext_tx_used_intra[EXT_TX_SETS_INTRA][TX_TYPES] = {
{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, },
{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, },
{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0},
};
// Transform types used in each inter set
static const int ext_tx_used_inter[EXT_TX_SETS_INTER][TX_TYPES] = {
{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1},
{ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1},
{1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
};
static INLINE int get_ext_tx_types(TX_SIZE tx_size, BLOCK_SIZE bs,
......
This diff is collapsed.
......@@ -111,21 +111,17 @@ typedef enum {
FLIPADST_FLIPADST = 6,
ADST_FLIPADST = 7,
FLIPADST_ADST = 8,
DST_DCT = 9,
DCT_DST = 10,
DST_ADST = 11,
ADST_DST = 12,
DST_FLIPADST = 13,
FLIPADST_DST = 14,
DST_DST = 15,
IDTX = 16,
V_DCT = 17,
H_DCT = 18,
IDTX = 9,
V_DCT = 10,
H_DCT = 11,
V_ADST = 12,
H_ADST = 13,
V_FLIPADST = 14,
H_FLIPADST = 15,
#endif // CONFIG_EXT_TX
TX_TYPES,
} TX_TYPE;
#if CONFIG_EXT_TX
#define EXT_TX_SIZES 4 // number of sizes that use extended transforms
#define EXT_TX_SETS_INTER 4 // Sets of transform selections for INTER
......
This diff is collapsed.
......@@ -2882,13 +2882,10 @@ const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors},
{col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors},
{row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors},
{col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors},
{row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors},
{col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors},
}, { // TX_8X8
......@@ -2902,13 +2899,10 @@ const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors},
{col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors},
{row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors},
{col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors},
{row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors},
{col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors},
}, { // TX_16X16
......@@ -2930,22 +2924,12 @@ const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors},
{col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors},
{row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors},
{col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors},
{row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors},
{col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors},
{row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors},
{col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors},
}, { // TX_32X32
{default_scan_32x32, vp10_default_iscan_32x32,
default_scan_32x32_neighbors},
......@@ -2965,26 +2949,14 @@ const scan_order vp10_intra_scan_orders[TX_SIZES][TX_TYPES] = {
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{h2_scan_32x32, vp10_h2_iscan_32x32,
h2_scan_32x32_neighbors},
{v2_scan_32x32, vp10_v2_iscan_32x32,
v2_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{default_scan_32x32, vp10_default_iscan_32x32,
default_scan_32x32_neighbors},
{h2_scan_32x32, vp10_h2_iscan_32x32,
h2_scan_32x32_neighbors},
{v2_scan_32x32, vp10_v2_iscan_32x32,
v2_scan_32x32_neighbors},
{mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors},
{mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors},
{mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors},
{mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors},
{mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors},
{mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors},
}
};
......@@ -3000,13 +2972,10 @@ const scan_order vp10_inter_scan_orders[TX_SIZES][TX_TYPES] = {
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors},
{mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors},
{mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors},
{mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors},
{mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors},
{mrow_scan_4x4, vp10_mrow_iscan_4x4, mrow_scan_4x4_neighbors},
{mcol_scan_4x4, vp10_mcol_iscan_4x4, mcol_scan_4x4_neighbors},
}, { // TX_8X8
......@@ -3020,13 +2989,10 @@ const scan_order vp10_inter_scan_orders[TX_SIZES][TX_TYPES] = {
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors},
{mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors},
{mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors},
{mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors},
{mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors},
{mrow_scan_8x8, vp10_mrow_iscan_8x8, mrow_scan_8x8_neighbors},
{mcol_scan_8x8, vp10_mcol_iscan_8x8, mcol_scan_8x8_neighbors},
}, { // TX_16X16
......@@ -3050,22 +3016,12 @@ const scan_order vp10_inter_scan_orders[TX_SIZES][TX_TYPES] = {
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{default_scan_16x16, vp10_default_iscan_16x16,
default_scan_16x16_neighbors},
{mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors},
{mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors},
{mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors},
{mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors},
{mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors},
{mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors},
{mrow_scan_16x16, vp10_mrow_iscan_16x16, mrow_scan_16x16_neighbors},
{mcol_scan_16x16, vp10_mcol_iscan_16x16, mcol_scan_16x16_neighbors},
}, { // TX_32X32
{default_scan_32x32, vp10_default_iscan_32x32,
default_scan_32x32_neighbors},
......@@ -3085,24 +3041,14 @@ const scan_order vp10_inter_scan_orders[TX_SIZES][TX_TYPES] = {
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{h2_scan_32x32, vp10_h2_iscan_32x32,
h2_scan_32x32_neighbors},
{v2_scan_32x32, vp10_v2_iscan_32x32,
v2_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{qtr_scan_32x32, vp10_qtr_iscan_32x32,
qtr_scan_32x32_neighbors},
{default_scan_32x32, vp10_default_iscan_32x32,
default_scan_32x32_neighbors},
{mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors},
{mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors},
{mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors},
{mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors},
{mrow_scan_32x32, vp10_mrow_iscan_32x32, mrow_scan_32x32_neighbors},
{mcol_scan_32x32, vp10_mcol_iscan_32x32, mcol_scan_32x32_neighbors},
}
};
......
......@@ -36,219 +36,6 @@ static INLINE void range_check(const tran_low_t *input, const int size,
#endif
}
#if CONFIG_EXT_TX
void fdst4(const tran_low_t *input, tran_low_t *output) {
tran_high_t step[4];
tran_high_t temp1, temp2;
step[0] = input[0] - input[3];
step[1] = -input[1] + input[2];
step[2] = -input[1] - input[2];
step[3] = input[0] + input[3];
temp1 = (step[0] + step[1]) * cospi_16_64;
temp2 = (step[0] - step[1]) * cospi_16_64;
output[3] = fdct_round_shift(temp1);
output[1] = fdct_round_shift(temp2);
temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64;
temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64;
output[2] = fdct_round_shift(temp1);
output[0] = fdct_round_shift(temp2);
}
void fdst8(const tran_low_t *input, tran_low_t *output) {
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16
tran_high_t t0, t1, t2, t3; // needs32
tran_high_t x0, x1, x2, x3; // canbe16
// stage 1
s0 = input[0] - input[7];
s1 = -input[1] + input[6];
s2 = input[2] - input[5];
s3 = -input[3] + input[4];
s4 = -input[3] - input[4];
s5 = input[2] + input[5];
s6 = -input[1] - input[6];
s7 = input[0] + input[7];
x0 = s0 + s3;
x1 = s1 + s2;
x2 = s1 - s2;
x3 = s0 - s3;
t0 = (x0 + x1) * cospi_16_64;
t1 = (x0 - x1) * cospi_16_64;
t2 = x2 * cospi_24_64 + x3 * cospi_8_64;
t3 = -x2 * cospi_8_64 + x3 * cospi_24_64;
output[7] = fdct_round_shift(t0);
output[5] = fdct_round_shift(t2);
output[3] = fdct_round_shift(t1);
output[1] = fdct_round_shift(t3);
// Stage 2
t0 = (s6 - s5) * cospi_16_64;
t1 = (s6 + s5) * cospi_16_64;
t2 = fdct_round_shift(t0);
t3 = fdct_round_shift(t1);
// Stage 3
x0 = s4 + t2;
x1 = s4 - t2;
x2 = s7 - t3;
x3 = s7 + t3;
// Stage 4
t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
output[6] = fdct_round_shift(t0);
output[4] = fdct_round_shift(t2);
output[2] = fdct_round_shift(t1);
output[0] = fdct_round_shift(t3);
}
void fdst16(const tran_low_t *input, tran_low_t *output) {
tran_high_t step1[8]; // canbe16
tran_high_t step2[8]; // canbe16
tran_high_t step3[8]; // canbe16
tran_high_t in[8]; // canbe16
tran_high_t temp1, temp2; // needs32
// step 1
in[0] = input[0] - input[15];
in[1] = -input[1] + input[14];
in[2] = input[2] - input[13];
in[3] = -input[3] + input[12];
in[4] = input[4] - input[11];
in[5] = -input[5] + input[10];
in[6] = input[6] - input[ 9];
in[7] = -input[7] + input[ 8];
step1[0] = -input[7] - input[ 8];
step1[1] = input[6] + input[ 9];
step1[2] = -input[5] - input[10];
step1[3] = input[4] + input[11];
step1[4] = -input[3] - input[12];
step1[5] = input[2] + input[13];
step1[6] = -input[1] - input[14];
step1[7] = input[0] + input[15];
// fdct8(step, step);
{
tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; // canbe16
tran_high_t t0, t1, t2, t3; // needs32
tran_high_t x0, x1, x2, x3; // canbe16
// stage 1
s0 = in[0] + in[7];
s1 = in[1] + in[6];
s2 = in[2] + in[5];
s3 = in[3] + in[4];
s4 = in[3] - in[4];
s5 = in[2] - in[5];
s6 = in[1] - in[6];
s7 = in[0] - in[7];
// fdct4(step, step);
x0 = s0 + s3;
x1 = s1 + s2;
x2 = s1 - s2;
x3 = s0 - s3;
t0 = (x0 + x1) * cospi_16_64;
t1 = (x0 - x1) * cospi_16_64;
t2 = x3 * cospi_8_64 + x2 * cospi_24_64;
t3 = x3 * cospi_24_64 - x2 * cospi_8_64;
output[15] = fdct_round_shift(t0);
output[11] = fdct_round_shift(t2);
output[7] = fdct_round_shift(t1);
output[3] = fdct_round_shift(t3);
// Stage 2
t0 = (s6 - s5) * cospi_16_64;
t1 = (s6 + s5) * cospi_16_64;
t2 = fdct_round_shift(t0);
t3 = fdct_round_shift(t1);
// Stage 3
x0 = s4 + t2;
x1 = s4 - t2;
x2 = s7 - t3;
x3 = s7 + t3;
// Stage 4
t0 = x0 * cospi_28_64 + x3 * cospi_4_64;
t1 = x1 * cospi_12_64 + x2 * cospi_20_64;
t2 = x2 * cospi_12_64 + x1 * -cospi_20_64;
t3 = x3 * cospi_28_64 + x0 * -cospi_4_64;
output[13] = fdct_round_shift(t0);
output[9] = fdct_round_shift(t2);
output[5] = fdct_round_shift(t1);
output[1] = fdct_round_shift(t3);
}
// step 2
temp1 = (step1[5] - step1[2]) * cospi_16_64;
temp2 = (step1[4] - step1[3]) * cospi_16_64;
step2[2] = fdct_round_shift(temp1);
step2[3] = fdct_round_shift(temp2);
temp1 = (step1[4] + step1[3]) * cospi_16_64;
temp2 = (step1[5] + step1[2]) * cospi_16_64;
step2[4] = fdct_round_shift(temp1);
step2[5] = fdct_round_shift(temp2);
// step 3
step3[0] = step1[0] + step2[3];
step3[1] = step1[1] + step2[2];
step3[2] = step1[1] - step2[2];
step3[3] = step1[0] - step2[3];
step3[4] = step1[7] - step2[4];
step3[5] = step1[6] - step2[5];
step3[6] = step1[6] + step2[5];
step3[7] = step1[7] + step2[4];
// step 4
temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64;
temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64;
step2[1] = fdct_round_shift(temp1);
step2[2] = fdct_round_shift(temp2);
temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64;
temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64;
step2[5] = fdct_round_shift(temp1);
step2[6] = fdct_round_shift(temp2);
// step 5
step1[0] = step3[0] + step2[1];
step1[1] = step3[0] - step2[1];
step1[2] = step3[3] + step2[2];
step1[3] = step3[3] - step2[2];
step1[4] = step3[4] - step2[5];
step1[5] = step3[4] + step2[5];
step1[6] = step3[7] - step2[6];
step1[7] = step3[7] + step2[6];
// step 6
temp1 = step1[0] * cospi_30_64 + step1[7] * cospi_2_64;
temp2 = step1[1] * cospi_14_64 + step1[6] * cospi_18_64;
output[14] = fdct_round_shift(temp1);
output[6] = fdct_round_shift(temp2);
temp1 = step1[2] * cospi_22_64 + step1[5] * cospi_10_64;
temp2 = step1[3] * cospi_6_64 + step1[4] * cospi_26_64;
output[10] = fdct_round_shift(temp1);
output[2] = fdct_round_shift(temp2);
temp1 = step1[3] * -cospi_26_64 + step1[4] * cospi_6_64;
temp2 = step1[2] * -cospi_10_64 + step1[5] * cospi_22_64;
output[12] = fdct_round_shift(temp1);
output[4] = fdct_round_shift(temp2);
temp1 = step1[1] * -cospi_18_64 + step1[6] * cospi_14_64;
temp2 = step1[0] * -cospi_2_64 + step1[7] * cospi_30_64;
output[8] = fdct_round_shift(temp1);
output[0] = fdct_round_shift(temp2);
}
#endif // CONFIG_EXT_TX
static void fdct4(const tran_low_t *input, tran_low_t *output) {
tran_high_t temp;
tran_low_t step[4];
......@@ -1236,22 +1023,6 @@ static void fidtx32(const tran_low_t *input, tran_low_t *output) {
output[i] = input[i] * 4;
}
// For use in lieu of DST
static void fhalfcenter32(const tran_low_t *input, tran_low_t *output) {
int i;
tran_low_t inputhalf[16];
for (i = 0; i < 8; ++i) {
output[16 + i] = input[i] * 4;
output[24 + i] = input[24 + i] * 4;
}
// Multiply input by sqrt(2)
for (i = 0; i < 16; ++i) {
inputhalf[i] = (tran_low_t)fdct_round_shift(input[i + 8] * Sqrt2);
}
fdct16(inputhalf, output);
// Note overall scaling factor is 4 times orthogonal
}
// For use in lieu of ADST
static void fhalfright32(const tran_low_t *input, tran_low_t *output) {
int i;
......@@ -1334,25 +1105,22 @@ static void maybe_flip_input(const int16_t **src, int *src_stride, int l,
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
case DST_DST:
case DCT_DST:
case DST_DCT:
case DST_ADST:
case ADST_DST:
case IDTX:
case H_DCT:
case V_DCT:
case H_DCT:
case V_ADST:
case H_ADST:
break;
case FLIPADST_DCT:
case FLIPADST_ADST:
case FLIPADST_DST:
case V_FLIPADST:
copy_flipud(*src, *src_stride, l, buff, l);
*src = buff;
*src_stride = l;
break;
case DCT_FLIPADST:
case ADST_FLIPADST:
case DST_FLIPADST:
case H_FLIPADST:
copy_fliplr(*src, *src_stride, l, buff, l);
*src = buff;
*src_stride = l;
......@@ -1370,98 +1138,86 @@ static void maybe_flip_input(const int16_t **src, int *src_stride, int l,
#endif // CONFIG_EXT_TX
static const transform_2d FHT_4[] = {
{ fdct4, fdct4 }, // DCT_DCT = 0,
{ fadst4, fdct4 }, // ADST_DCT = 1,
{ fdct4, fadst4 }, // DCT_ADST = 2,
{ fadst4, fadst4 }, // ADST_ADST = 3,