Commit fcdb1b45 authored by Sebastien Alaiwan's avatar Sebastien Alaiwan

idct.c: drop unreachable code (non-scaled transforms)

Change-Id: I2890066b64b4fd8549a0c9a7741a7ec3274bdc3c
parent eb8e455e
......@@ -117,24 +117,6 @@ static void ihalfright64_c(const tran_low_t *input, tran_low_t *output) {
}
#endif // CONFIG_TX64X64 && (!CONFIG_DAALA_TX32 || !CONFIG_DAALA_TX64)
// Inverse identity transform and add.
#if !(CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16 && \
CONFIG_DAALA_TX32 && (!CONFIG_TX64X64 || CONFIG_DAALA_TX64))
static void inv_idtx_add_c(const tran_low_t *input, uint8_t *dest, int stride,
int bsx, int bsy, TX_TYPE tx_type) {
const int pels = bsx * bsy;
const int shift = 3 - ((pels > 256) + (pels > 1024));
if (tx_type == IDTX) {
for (int r = 0; r < bsy; ++r) {
for (int c = 0; c < bsx; ++c)
dest[c] = clip_pixel_add(dest[c], input[c] >> shift);
dest += stride;
input += bsx;
}
}
}
#endif
#define FLIPUD_PTR(dest, stride, size) \
do { \
(dest) = (dest) + ((size)-1) * (stride); \
......@@ -1802,351 +1784,6 @@ void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
aom_iwht4x4_1_add(input, dest, stride);
}
#if !CONFIG_DAALA_TX
#if !CONFIG_DAALA_TX8
static void idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
// If dc is 1, then input[0] is the reconstructed value, do not need
// dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to decide what to do.
// TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
// Combine that with code here.
const int16_t half = 12;
const int eob = txfm_param->eob;
if (eob == 1)
// DC only DCT coefficient
aom_idct8x8_1_add(input, dest, stride);
else if (eob <= half)
aom_idct8x8_12_add(input, dest, stride);
else
aom_idct8x8_64_add(input, dest, stride);
}
#endif
#if !CONFIG_DAALA_TX16
static void idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
// The calculation can be simplified if there are not many non-zero dct
// coefficients. Use eobs to separate different cases.
const int16_t half = 38;
const int16_t quarter = 10;
const int eob = txfm_param->eob;
if (eob == 1) /* DC only DCT coefficient. */
aom_idct16x16_1_add(input, dest, stride);
else if (eob <= quarter)
aom_idct16x16_10_add(input, dest, stride);
else if (eob <= half)
aom_idct16x16_38_add(input, dest, stride);
else
aom_idct16x16_256_add(input, dest, stride);
}
#endif
#if !CONFIG_DAALA_TX32
static void idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
const int16_t half = 135;
const int16_t quarter = 34;
const int eob = txfm_param->eob;
if (eob == 1)
aom_idct32x32_1_add(input, dest, stride);
else if (eob <= quarter)
// non-zero coeff only in upper-left 8x8
aom_idct32x32_34_add(input, dest, stride);
else if (eob <= half)
// non-zero coeff only in upper-left 16x16
aom_idct32x32_135_add(input, dest, stride);
else
aom_idct32x32_1024_add(input, dest, stride);
}
#endif
#if CONFIG_TX64X64 && !CONFIG_DAALA_TX64
static void idct64x64_add(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
(void)txfm_param;
av1_iht64x64_4096_add(input, dest, stride, txfm_param);
}
#endif // CONFIG_TX64X64 && !CONFIG_DAALA_TX64
static void inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
const TX_TYPE tx_type = txfm_param->tx_type;
if (txfm_param->lossless) {
assert(tx_type == DCT_DCT);
av1_iwht4x4_add(input, dest, stride, txfm_param);
return;
}
#if CONFIG_DAALA_TX4
(void)tx_type;
av1_iht4x4_16_add_c(input, dest, stride, txfm_param);
#else
switch (tx_type) {
case DCT_DCT: av1_idct4x4_add(input, dest, stride, txfm_param); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST: av1_iht4x4_16_add(input, dest, stride, txfm_param); break;
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
av1_iht4x4_16_add(input, dest, stride, txfm_param);
break;
case V_DCT:
case H_DCT:
case V_ADST:
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
av1_iht4x4_16_add_c(input, dest, stride, txfm_param);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 4, 4, tx_type); break;
default: assert(0); break;
}
#endif
}
static void inv_txfm_add_4x8(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
av1_iht4x8_32_add_c(input, dest, stride, txfm_param);
#else
av1_iht4x8_32_add(input, dest, stride, txfm_param);
#endif
}
static void inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
#if (CONFIG_DAALA_TX4 && CONFIG_DAALA_TX8)
av1_iht8x4_32_add_c(input, dest, stride, txfm_param);
#else
av1_iht8x4_32_add(input, dest, stride, txfm_param);
#endif
}
// These will be used by the masked-tx experiment in the future.
static void inv_txfm_add_4x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
av1_iht4x16_64_add(input, dest, stride, txfm_param);
}
static void inv_txfm_add_16x4(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
av1_iht16x4_64_add(input, dest, stride, txfm_param);
}
static void inv_txfm_add_8x32(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
av1_iht8x32_256_add(input, dest, stride, txfm_param);
}
static void inv_txfm_add_32x8(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
av1_iht32x8_256_add(input, dest, stride, txfm_param);
}
static void inv_txfm_add_8x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
av1_iht8x16_128_add_c(input, dest, stride, txfm_param);
#else
av1_iht8x16_128_add(input, dest, stride, txfm_param);
#endif
}
static void inv_txfm_add_16x8(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if (CONFIG_DAALA_TX8 && CONFIG_DAALA_TX16)
av1_iht16x8_128_add_c(input, dest, stride, txfm_param);
#else
av1_iht16x8_128_add(input, dest, stride, txfm_param);
#endif
}
static void inv_txfm_add_16x32(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX16 && CONFIG_DAALA_TX32
av1_iht16x32_512_add_c(input, dest, stride, txfm_param);
#else
av1_iht16x32_512_add(input, dest, stride, txfm_param);
#endif
}
static void inv_txfm_add_32x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX16 && CONFIG_DAALA_TX32
av1_iht32x16_512_add_c(input, dest, stride, txfm_param);
#else
av1_iht32x16_512_add(input, dest, stride, txfm_param);
#endif
}
#if CONFIG_TX64X64
static void inv_txfm_add_32x64(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX64 && CONFIG_DAALA_TX32
av1_iht32x64_2048_add_c(input, dest, stride, txfm_param);
#else
av1_iht32x64_2048_add(input, dest, stride, txfm_param);
#endif
}
static void inv_txfm_add_64x32(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX64 && CONFIG_DAALA_TX32
av1_iht64x32_2048_add_c(input, dest, stride, txfm_param);
#else
av1_iht64x32_2048_add(input, dest, stride, txfm_param);
#endif
}
static void inv_txfm_add_16x64(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
av1_iht16x64_1024_add(input, dest, stride, txfm_param);
}
static void inv_txfm_add_64x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
av1_iht64x16_1024_add(input, dest, stride, txfm_param);
}
#endif // CONFIG_TX64X64
static void inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX8
av1_iht8x8_64_add_c(input, dest, stride, txfm_param);
#else
const TX_TYPE tx_type = txfm_param->tx_type;
switch (tx_type) {
case DCT_DCT: idct8x8_add(input, dest, stride, txfm_param); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST: av1_iht8x8_64_add(input, dest, stride, txfm_param); break;
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
av1_iht8x8_64_add(input, dest, stride, txfm_param);
break;
case V_DCT:
case H_DCT:
case V_ADST:
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
// Use C version since DST only exists in C code
av1_iht8x8_64_add_c(input, dest, stride, txfm_param);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 8, 8, tx_type); break;
default: assert(0); break;
}
#endif
}
static void inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX16
av1_iht16x16_256_add_c(input, dest, stride, txfm_param);
#else
const TX_TYPE tx_type = txfm_param->tx_type;
switch (tx_type) {
case DCT_DCT: idct16x16_add(input, dest, stride, txfm_param); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
av1_iht16x16_256_add(input, dest, stride, txfm_param);
break;
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
case V_DCT:
case H_DCT:
case V_ADST:
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
av1_iht16x16_256_add(input, dest, stride, txfm_param);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 16, 16, tx_type); break;
default: assert(0); break;
}
#endif
}
static void inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX32
av1_iht32x32_1024_add_c(input, dest, stride, txfm_param);
#else
const TX_TYPE tx_type = txfm_param->tx_type;
switch (tx_type) {
case DCT_DCT: idct32x32_add(input, dest, stride, txfm_param); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
case V_DCT:
case H_DCT:
case V_ADST:
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
av1_iht32x32_1024_add_c(input, dest, stride, txfm_param);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 32, 32, tx_type); break;
default: assert(0); break;
}
#endif
}
#if CONFIG_TX64X64
static void inv_txfm_add_64x64(const tran_low_t *input, uint8_t *dest,
int stride, const TxfmParam *txfm_param) {
#if CONFIG_DAALA_TX64
av1_iht64x64_4096_add_c(input, dest, stride, txfm_param);
#else
const TX_TYPE tx_type = txfm_param->tx_type;
assert(tx_type == DCT_DCT);
switch (tx_type) {
case DCT_DCT: idct64x64_add(input, dest, stride, txfm_param); break;
case ADST_DCT:
case DCT_ADST:
case ADST_ADST:
case FLIPADST_DCT:
case DCT_FLIPADST:
case FLIPADST_FLIPADST:
case ADST_FLIPADST:
case FLIPADST_ADST:
case V_DCT:
case H_DCT:
case V_ADST:
case H_ADST:
case V_FLIPADST:
case H_FLIPADST:
av1_iht64x64_4096_add_c(input, dest, stride, txfm_param);
break;
case IDTX: inv_idtx_add_c(input, dest, stride, 64, 64, tx_type); break;
default: assert(0); break;
}
#endif
}
#endif // CONFIG_TX64X64
#endif // !CONFIG_DAALA_TX
void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
int eob, int bd) {
if (eob > 1)
......@@ -2458,46 +2095,6 @@ static void highbd_inv_txfm_add_64x64(const tran_low_t *input, uint8_t *dest,
#endif // CONFIG_TX64X64
#endif // !CONFIG_DAALA_TX
void av1_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
TxfmParam *txfm_param) {
assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]);
#if CONFIG_DAALA_TX
assert(!txfm_param->is_hbd);
daala_inv_txfm_add(input, dest, stride, txfm_param);
#else
const TX_SIZE tx_size = txfm_param->tx_size;
switch (tx_size) {
case TX_32X32: inv_txfm_add_32x32(input, dest, stride, txfm_param); break;
case TX_16X16: inv_txfm_add_16x16(input, dest, stride, txfm_param); break;
case TX_8X8: inv_txfm_add_8x8(input, dest, stride, txfm_param); break;
case TX_4X8: inv_txfm_add_4x8(input, dest, stride, txfm_param); break;
case TX_8X4: inv_txfm_add_8x4(input, dest, stride, txfm_param); break;
case TX_8X16: inv_txfm_add_8x16(input, dest, stride, txfm_param); break;
case TX_16X8: inv_txfm_add_16x8(input, dest, stride, txfm_param); break;
case TX_16X32: inv_txfm_add_16x32(input, dest, stride, txfm_param); break;
case TX_32X16: inv_txfm_add_32x16(input, dest, stride, txfm_param); break;
#if CONFIG_TX64X64
case TX_64X64: inv_txfm_add_64x64(input, dest, stride, txfm_param); break;
case TX_32X64: inv_txfm_add_32x64(input, dest, stride, txfm_param); break;
case TX_64X32: inv_txfm_add_64x32(input, dest, stride, txfm_param); break;
case TX_16X64: inv_txfm_add_16x64(input, dest, stride, txfm_param); break;
case TX_64X16: inv_txfm_add_64x16(input, dest, stride, txfm_param); break;
#endif // CONFIG_TX64X64
case TX_4X4:
// this is like av1_short_idct4x4 but has a special case around eob<=1
// which is significant (not just an optimization) for the lossless
// case.
inv_txfm_add_4x4(input, dest, stride, txfm_param);
break;
case TX_32X8: inv_txfm_add_32x8(input, dest, stride, txfm_param); break;
case TX_8X32: inv_txfm_add_8x32(input, dest, stride, txfm_param); break;
case TX_16X4: inv_txfm_add_16x4(input, dest, stride, txfm_param); break;
case TX_4X16: inv_txfm_add_4x16(input, dest, stride, txfm_param); break;
default: assert(0 && "Invalid transform size"); break;
}
#endif
}
void av1_inv_txfm_add_txmg(const tran_low_t *dqcoeff, uint8_t *dst, int stride,
TxfmParam *txfm_param) {
const TX_SIZE tx_size = txfm_param->tx_size;
......
......@@ -39,8 +39,6 @@ void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
const TxfmParam *txfm_param);
void av1_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
TxfmParam *txfm_param);
void av1_inverse_transform_block(const MACROBLOCKD *xd,
const tran_low_t *dqcoeff, int plane,
TX_TYPE tx_type, TX_SIZE tx_size, uint8_t *dst,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment