Commit 400bf651 authored by Hui Su's avatar Hui Su Committed by Sebastien Alaiwan

Remove dpcm-intra experiment

Coding gain becomes tiny on top of other experiments.

Change-Id: Ia89b1c2a2653f3833dff8ac8bb612eaa3ba18446
parent f16c3c89
......@@ -420,16 +420,6 @@ if (aom_config("CONFIG_HIGHBITDEPTH") ne "yes") {
add_proto qw/void av1_fwd_idtx/, "const int16_t *src_diff, tran_low_t *coeff, int stride, int bs, int tx_type";
if (aom_config("CONFIG_DPCM_INTRA") eq "yes") {
@sizes = (4, 8, 16, 32);
foreach $size (@sizes) {
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
add_proto "void", "av1_hbd_dpcm_ft$size", "const int16_t *input, int stride, TX_TYPE_1D tx_type, tran_low_t *output, int dir";
}
add_proto "void", "av1_dpcm_ft$size", "const int16_t *input, int stride, TX_TYPE_1D tx_type, tran_low_t *output";
}
}
#fwd txfm
add_proto qw/void av1_fwd_txfm2d_4x8/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
add_proto qw/void av1_fwd_txfm2d_8x4/, "const int16_t *input, int32_t *output, int stride, int tx_type, int bd";
......
......@@ -1235,7 +1235,7 @@ static INLINE int av1_raster_order_to_block_index(TX_SIZE tx_size,
return (tx_size == TX_4X4) ? raster_order : (raster_order > 0) ? 2 : 0;
}
#if CONFIG_DPCM_INTRA || CONFIG_LGT
#if CONFIG_LGT
static INLINE PREDICTION_MODE get_prediction_mode(const MODE_INFO *mi,
int plane, TX_SIZE tx_size,
int block_idx) {
......@@ -1246,7 +1246,7 @@ static INLINE PREDICTION_MODE get_prediction_mode(const MODE_INFO *mi,
return (plane == PLANE_TYPE_Y) ? get_y_mode(mi, block_raster_idx)
: get_uv_mode(mbmi->uv_mode);
}
#endif
#endif // CONFIG_LGT
static INLINE TX_TYPE get_default_tx_type(PLANE_TYPE plane_type,
const MACROBLOCKD *xd, int block_idx,
......
......@@ -2429,193 +2429,3 @@ void av1_highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
default: assert(0 && "Invalid transform size"); break;
}
}
#if CONFIG_DPCM_INTRA
void av1_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct4_c, aom_iadst4_c, aom_iadst4_c,
iidtx4_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[4];
inv_tx(input, out);
for (int i = 0; i < 4; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 4));
}
}
void av1_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct8_c, aom_iadst8_c, aom_iadst8_c,
iidtx8_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[8];
inv_tx(input, out);
for (int i = 0; i < 8; ++i) {
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 4));
}
}
void av1_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct16_c, aom_iadst16_c,
aom_iadst16_c, iidtx16_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[16];
inv_tx(input, out);
for (int i = 0; i < 16; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 5));
}
}
void av1_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d IHT[] = { aom_idct32_c, ihalfright32_c,
ihalfright32_c, iidtx32_c };
const transform_1d inv_tx = IHT[tx_type];
tran_low_t out[32];
inv_tx(input, out);
for (int i = 0; i < 32; ++i) {
dest[i * stride] =
clip_pixel_add(dest[i * stride], ROUND_POWER_OF_TWO(out[i], 4));
}
}
dpcm_inv_txfm_add_func av1_get_dpcm_inv_txfm_add_func(int tx_length) {
switch (tx_length) {
case 4: return av1_dpcm_inv_txfm_add_4_c;
case 8: return av1_dpcm_inv_txfm_add_8_c;
case 16: return av1_dpcm_inv_txfm_add_16_c;
case 32:
return av1_dpcm_inv_txfm_add_32_c;
// TODO(huisu): add support for TX_64X64.
default: assert(0); return NULL;
}
}
#if CONFIG_HIGHBITDEPTH
// TODO(sarahparker) I am adding a quick workaround for these functions
// to remove the old hbd transforms. This will be cleaned up in a followup.
void av1_hbd_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir) {
assert(tx_type < TX_TYPES_1D);
static const TxfmFunc IHT[] = { av1_idct4_new, av1_iadst4_new, av1_iadst4_new,
av1_iidentity4_c };
// In order { horizontal, vertical }
static const TXFM_1D_CFG *inv_txfm_cfg_ls[TX_TYPES_1D][2] = {
{ &inv_txfm_1d_row_cfg_dct_4, &inv_txfm_1d_col_cfg_dct_4 },
{ &inv_txfm_1d_row_cfg_adst_4, &inv_txfm_1d_col_cfg_adst_4 },
{ &inv_txfm_1d_row_cfg_adst_4, &inv_txfm_1d_col_cfg_adst_4 },
{ &inv_txfm_1d_cfg_identity_4, &inv_txfm_1d_cfg_identity_4 }
};
const TXFM_1D_CFG *inv_txfm_cfg = inv_txfm_cfg_ls[tx_type][dir];
const TxfmFunc inv_tx = IHT[tx_type];
tran_low_t out[4];
inv_tx(input, out, inv_txfm_cfg->cos_bit, inv_txfm_cfg->stage_range);
for (int i = 0; i < 4; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 4), bd);
}
}
void av1_hbd_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir) {
assert(tx_type < TX_TYPES_1D);
static const TxfmFunc IHT[] = { av1_idct4_new, av1_iadst4_new, av1_iadst4_new,
av1_iidentity4_c };
// In order { horizontal, vertical }
static const TXFM_1D_CFG *inv_txfm_cfg_ls[TX_TYPES_1D][2] = {
{ &inv_txfm_1d_row_cfg_dct_8, &inv_txfm_1d_col_cfg_dct_8 },
{ &inv_txfm_1d_row_cfg_adst_8, &inv_txfm_1d_col_cfg_adst_8 },
{ &inv_txfm_1d_row_cfg_adst_8, &inv_txfm_1d_col_cfg_adst_8 },
{ &inv_txfm_1d_cfg_identity_8, &inv_txfm_1d_cfg_identity_8 }
};
const TXFM_1D_CFG *inv_txfm_cfg = inv_txfm_cfg_ls[tx_type][dir];
const TxfmFunc inv_tx = IHT[tx_type];
tran_low_t out[8];
inv_tx(input, out, inv_txfm_cfg->cos_bit, inv_txfm_cfg->stage_range);
for (int i = 0; i < 8; ++i) {
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 4), bd);
}
}
void av1_hbd_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir) {
assert(tx_type < TX_TYPES_1D);
static const TxfmFunc IHT[] = { av1_idct4_new, av1_iadst4_new, av1_iadst4_new,
av1_iidentity4_c };
// In order { horizontal, vertical }
static const TXFM_1D_CFG *inv_txfm_cfg_ls[TX_TYPES_1D][2] = {
{ &inv_txfm_1d_row_cfg_dct_16, &inv_txfm_1d_col_cfg_dct_16 },
{ &inv_txfm_1d_row_cfg_adst_16, &inv_txfm_1d_col_cfg_adst_16 },
{ &inv_txfm_1d_row_cfg_adst_16, &inv_txfm_1d_col_cfg_adst_16 },
{ &inv_txfm_1d_cfg_identity_16, &inv_txfm_1d_cfg_identity_16 }
};
const TXFM_1D_CFG *inv_txfm_cfg = inv_txfm_cfg_ls[tx_type][dir];
const TxfmFunc inv_tx = IHT[tx_type];
tran_low_t out[16];
inv_tx(input, out, inv_txfm_cfg->cos_bit, inv_txfm_cfg->stage_range);
for (int i = 0; i < 16; ++i) {
out[i] = (tran_low_t)dct_const_round_shift(out[i] * Sqrt2);
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 5), bd);
}
}
void av1_hbd_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir) {
assert(tx_type < TX_TYPES_1D);
static const TxfmFunc IHT[] = { av1_idct4_new, av1_iadst4_new, av1_iadst4_new,
av1_iidentity4_c };
// In order { horizontal, vertical }
static const TXFM_1D_CFG *inv_txfm_cfg_ls[TX_TYPES_1D][2] = {
{ &inv_txfm_1d_row_cfg_dct_32, &inv_txfm_1d_col_cfg_dct_32 },
{ &inv_txfm_1d_row_cfg_adst_32, &inv_txfm_1d_col_cfg_adst_32 },
{ &inv_txfm_1d_row_cfg_adst_32, &inv_txfm_1d_col_cfg_adst_32 },
{ &inv_txfm_1d_cfg_identity_32, &inv_txfm_1d_cfg_identity_32 }
};
const TXFM_1D_CFG *inv_txfm_cfg = inv_txfm_cfg_ls[tx_type][dir];
const TxfmFunc inv_tx = IHT[tx_type];
tran_low_t out[32];
inv_tx(input, out, inv_txfm_cfg->cos_bit, inv_txfm_cfg->stage_range);
for (int i = 0; i < 32; ++i) {
dest[i * stride] = highbd_clip_pixel_add(dest[i * stride],
ROUND_POWER_OF_TWO(out[i], 4), bd);
}
}
hbd_dpcm_inv_txfm_add_func av1_get_hbd_dpcm_inv_txfm_add_func(int tx_length) {
switch (tx_length) {
case 4: return av1_hbd_dpcm_inv_txfm_add_4_c;
case 8: return av1_hbd_dpcm_inv_txfm_add_8_c;
case 16: return av1_hbd_dpcm_inv_txfm_add_16_c;
case 32:
return av1_hbd_dpcm_inv_txfm_add_32_c;
// TODO(huisu): add support for TX_64X64.
default: assert(0); return NULL;
}
}
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_DPCM_INTRA
......@@ -72,37 +72,6 @@ void av1_highbd_inv_txfm_add_8x4(const tran_low_t *input, uint8_t *dest,
void av1_highbd_inv_txfm_add(const tran_low_t *input, uint8_t *dest, int stride,
TxfmParam *txfm_param);
#if CONFIG_DPCM_INTRA
void av1_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
void av1_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
void av1_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
void av1_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
typedef void (*dpcm_inv_txfm_add_func)(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, uint8_t *dest);
dpcm_inv_txfm_add_func av1_get_dpcm_inv_txfm_add_func(int tx_length);
#if CONFIG_HIGHBITDEPTH
void av1_hbd_dpcm_inv_txfm_add_4_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir);
void av1_hbd_dpcm_inv_txfm_add_8_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir);
void av1_hbd_dpcm_inv_txfm_add_16_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir);
void av1_hbd_dpcm_inv_txfm_add_32_c(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd, uint16_t *dest,
int dir);
typedef void (*hbd_dpcm_inv_txfm_add_func)(const tran_low_t *input, int stride,
TX_TYPE_1D tx_type, int bd,
uint16_t *dest, int dir);
hbd_dpcm_inv_txfm_add_func av1_get_hbd_dpcm_inv_txfm_add_func(int tx_length);
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_DPCM_INTRA
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -19,20 +19,6 @@
extern "C" {
#endif
#if CONFIG_DPCM_INTRA
static INLINE int av1_use_dpcm_intra(int plane, PREDICTION_MODE mode,
TX_TYPE tx_type,
const MB_MODE_INFO *const mbmi) {
(void)mbmi;
(void)plane;
#if CONFIG_EXT_INTRA
if (mbmi->sb_type >= BLOCK_8X8 && mbmi->angle_delta[plane != 0]) return 0;
#endif // CONFIG_EXT_INTRA
return (mode == V_PRED && (tx_type == IDTX || tx_type == H_DCT)) ||
(mode == H_PRED && (tx_type == IDTX || tx_type == V_DCT));
}
#endif // CONFIG_DPCM_INTRA
void av1_init_intra_predictors(void);
void av1_predict_intra_block_facade(MACROBLOCKD *xd, int plane, int block_idx,
int blk_col, int blk_row, TX_SIZE tx_size);
......
......@@ -471,133 +471,6 @@ static int av1_pvq_decode_helper2(AV1_COMMON *cm, MACROBLOCKD *const xd,
}
#endif
#if CONFIG_DPCM_INTRA
static void process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
const tran_low_t *dqcoeff, uint8_t *dst,
int dst_stride) {
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
dpcm_inv_txfm_add_func inverse_tx =
av1_get_dpcm_inv_txfm_add_func(tx1d_width);
for (int r = 0; r < tx1d_height; ++r) {
if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0]));
inverse_tx(dqcoeff, 1, tx_type_1d, dst);
dqcoeff += tx1d_width;
dst += dst_stride;
}
}
static void process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
const tran_low_t *dqcoeff, uint8_t *dst,
int dst_stride) {
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
dpcm_inv_txfm_add_func inverse_tx =
av1_get_dpcm_inv_txfm_add_func(tx1d_height);
tran_low_t tx_buff[64];
for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) {
for (int r = 0; r < tx1d_height; ++r) {
if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1];
tx_buff[r] = dqcoeff[r * tx1d_width];
}
inverse_tx(tx_buff, dst_stride, tx_type_1d, dst);
}
}
#if CONFIG_HIGHBITDEPTH
static void hbd_process_block_dpcm_vert(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
int bd, const tran_low_t *dqcoeff,
uint8_t *dst8, int dst_stride) {
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
hbd_dpcm_inv_txfm_add_func inverse_tx =
av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_width);
for (int r = 0; r < tx1d_height; ++r) {
if (r > 0) memcpy(dst, dst - dst_stride, tx1d_width * sizeof(dst[0]));
inverse_tx(dqcoeff, 1, tx_type_1d, bd, dst, 1);
dqcoeff += tx1d_width;
dst += dst_stride;
}
}
static void hbd_process_block_dpcm_horz(TX_SIZE tx_size, TX_TYPE_1D tx_type_1d,
int bd, const tran_low_t *dqcoeff,
uint8_t *dst8, int dst_stride) {
uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
const int tx1d_width = tx_size_wide[tx_size];
const int tx1d_height = tx_size_high[tx_size];
hbd_dpcm_inv_txfm_add_func inverse_tx =
av1_get_hbd_dpcm_inv_txfm_add_func(tx1d_height);
tran_low_t tx_buff[64];
switch (tx1d_height) {
case 4: inverse_tx = av1_hbd_dpcm_inv_txfm_add_4_c; break;
case 8: inverse_tx = av1_hbd_dpcm_inv_txfm_add_8_c; break;
case 16: inverse_tx = av1_hbd_dpcm_inv_txfm_add_16_c; break;
case 32: inverse_tx = av1_hbd_dpcm_inv_txfm_add_32_c; break;
default: assert(0);
}
for (int c = 0; c < tx1d_width; ++c, ++dqcoeff, ++dst) {
for (int r = 0; r < tx1d_height; ++r) {
if (c > 0) dst[r * dst_stride] = dst[r * dst_stride - 1];
tx_buff[r] = dqcoeff[r * tx1d_width];
}
inverse_tx(tx_buff, dst_stride, tx_type_1d, bd, dst, 0);
}
}
#endif // CONFIG_HIGHBITDEPTH
static void inverse_transform_block_dpcm(MACROBLOCKD *xd, int plane,
PREDICTION_MODE mode, TX_SIZE tx_size,
TX_TYPE tx_type, uint8_t *dst,
int dst_stride, int16_t scan_line) {
struct macroblockd_plane *const pd = &xd->plane[plane];
tran_low_t *const dqcoeff = pd->dqcoeff;
TX_TYPE_1D tx_type_1d = DCT_1D;
switch (tx_type) {
case IDTX: tx_type_1d = IDTX_1D; break;
case V_DCT:
assert(mode == H_PRED);
tx_type_1d = DCT_1D;
break;
case H_DCT:
assert(mode == V_PRED);
tx_type_1d = DCT_1D;
break;
default: assert(0);
}
switch (mode) {
case V_PRED:
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
hbd_process_block_dpcm_vert(tx_size, tx_type_1d, xd->bd, dqcoeff, dst,
dst_stride);
} else {
#endif // CONFIG_HIGHBITDEPTH
process_block_dpcm_vert(tx_size, tx_type_1d, dqcoeff, dst, dst_stride);
#if CONFIG_HIGHBITDEPTH
}
#endif // CONFIG_HIGHBITDEPTH
break;
case H_PRED:
#if CONFIG_HIGHBITDEPTH
if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
hbd_process_block_dpcm_horz(tx_size, tx_type_1d, xd->bd, dqcoeff, dst,
dst_stride);
} else {
#endif // CONFIG_HIGHBITDEPTH
process_block_dpcm_horz(tx_size, tx_type_1d, dqcoeff, dst, dst_stride);
#if CONFIG_HIGHBITDEPTH
}
#endif // CONFIG_HIGHBITDEPTH
break;
default: assert(0);
}
memset(dqcoeff, 0, (scan_line + 1) * sizeof(dqcoeff[0]));
}
#endif // CONFIG_DPCM_INTRA
static void predict_and_reconstruct_intra_block(
AV1_COMMON *cm, MACROBLOCKD *const xd, aom_reader *const r,
MB_MODE_INFO *const mbmi, int plane, int row, int col, TX_SIZE tx_size) {
......@@ -631,25 +504,16 @@ static void predict_and_reconstruct_intra_block(
if (eob) {
uint8_t *dst =
&pd->dst.buf[(row * pd->dst.stride + col) << tx_size_wide_log2[0]];
#if CONFIG_DPCM_INTRA || CONFIG_LGT
#if CONFIG_LGT
const PREDICTION_MODE mode =
get_prediction_mode(xd->mi[0], plane, tx_size, block_idx);
#if CONFIG_DPCM_INTRA
if (av1_use_dpcm_intra(plane, mode, tx_type, mbmi)) {
inverse_transform_block_dpcm(xd, plane, mode, tx_size, tx_type, dst,
pd->dst.stride, max_scan_line);
} else {
#endif // CONFIG_DPCM_INTRA
#endif // CONFIG_DPCM_INTRA || CONFIG_LGT
inverse_transform_block(xd, plane,
#endif // CONFIG_LGT
inverse_transform_block(xd, plane,
#if CONFIG_LGT
mode,
mode,
#endif
tx_type, tx_size, dst, pd->dst.stride,
max_scan_line, eob);
#if CONFIG_DPCM_INTRA
}
#endif // CONFIG_DPCM_INTRA
tx_type, tx_size, dst, pd->dst.stride,
max_scan_line, eob);
}
#else // !CONFIG_PVQ
const TX_TYPE tx_type =
......
......@@ -2708,98 +2708,4 @@ void av1_fwd_idtx_c(const int16_t *src_diff, tran_low_t *coeff, int stride,
}
}
#endif // CONFIG_EXT_TX
#if CONFIG_DPCM_INTRA
void av1_dpcm_ft4_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct4, fadst4, fadst4, fidtx4 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[4];
for (int i = 0; i < 4; ++i)
temp_in[i] = (tran_low_t)fdct_round_shift(input[i * stride] * 4 * Sqrt2);
ft(temp_in, output);
}
void av1_dpcm_ft8_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct8, fadst8, fadst8, fidtx8 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[8];
for (int i = 0; i < 8; ++i) temp_in[i] = input[i * stride] * 4;
ft(temp_in, output);
}
void av1_dpcm_ft16_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct16, fadst16, fadst16, fidtx16 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[16];
for (int i = 0; i < 16; ++i)
temp_in[i] = (tran_low_t)fdct_round_shift(input[i * stride] * 2 * Sqrt2);
ft(temp_in, output);
}
void av1_dpcm_ft32_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output) {
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct32, fhalfright32, fhalfright32,
fidtx32 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[32];
for (int i = 0; i < 32; ++i) temp_in[i] = input[i * stride];
ft(temp_in, output);
}
#if CONFIG_HIGHBITDEPTH
void av1_hbd_dpcm_ft4_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output, int dir) {
(void)dir;
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct4, fadst4, fadst4, fidtx4 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[4];
for (int i = 0; i < 4; ++i)
temp_in[i] = (tran_low_t)fdct_round_shift(input[i * stride] * 4 * Sqrt2);
ft(temp_in, output);
}
void av1_hbd_dpcm_ft8_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output, int dir) {
(void)dir;
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct8, fadst8, fadst8, fidtx8 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[8];
for (int i = 0; i < 8; ++i) temp_in[i] = input[i * stride] * 4;
ft(temp_in, output);
}
void av1_hbd_dpcm_ft16_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output, int dir) {
(void)dir;
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct16, fadst16, fadst16, fidtx16 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[16];
for (int i = 0; i < 16; ++i)
temp_in[i] = (tran_low_t)fdct_round_shift(input[i * stride] * 2 * Sqrt2);
ft(temp_in, output);
}
void av1_hbd_dpcm_ft32_c(const int16_t *input, int stride, TX_TYPE_1D tx_type,
tran_low_t *output, int dir) {
(void)dir;
assert(tx_type < TX_TYPES_1D);
static const transform_1d FHT[] = { fdct32, fhalfright32, fhalfright32,
fidtx32 };
const transform_1d ft = FHT[tx_type];
tran_low_t temp_in[32];
for (int i = 0; i < 32; ++i) temp_in[i] = input[i * stride];
ft(temp_in, output);
}
#endif // CONFIG_HIGHBITDEPTH
#endif // CONFIG_DPCM_INTRA
#endif // !AV1_DCT_GTEST
This diff is collapsed.
......@@ -89,15 +89,6 @@ void av1_store_pvq_enc_info(PVQ_INFO *pvq_info, int *qg, int *theta, int *k,
int *size, int skip_rest, int skip_dir, int bs);
#endif
#if CONFIG_DPCM_INTRA
void av1_encode_block_intra_dpcm(const AV1_COMMON *cm, MACROBLOCK *x,
PREDICTION_MODE mode, int plane, int block,
int blk_row, int blk_col,
BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
TX_TYPE tx_type, ENTROPY_CONTEXT *ta,
ENTROPY_CONTEXT *tl, int8_t *skip);
#endif // CONFIG_DPCM_INTRA
#ifdef __cplusplus
} // extern "C"
#endif
......
......@@ -2030,25 +2030,6 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
if (!is_inter_block(mbmi)) {
av1_predict_intra_block_facade(xd, plane, block, blk_col, blk_row, tx_size);
#if CONFIG_DPCM_INTRA
const int block_raster_idx =
av1_block_index_to_raster_order(tx_size, block);
const PREDICTION_MODE mode = (plane == AOM_PLANE_Y)
? get_y_mode(xd->mi[0], block_raster_idx)
: get_uv_mode(mbmi->uv_mode);
TX_TYPE tx_type =
av1_get_tx_type((plane == AOM_PLANE_Y) ? PLANE_TYPE_Y : PLANE_TYPE_UV,
xd, blk_row, blk_col, block, tx_size);
if (av1_use_dpcm_intra(plane, mode, tx_type, mbmi)) {
int8_t skip;
av1_encode_block_intra_dpcm(cm, x, mode, plane, block, blk_row, blk_col,
plane_bsize, tx_size, tx_type, a, l, &skip);
av1_dist_block(args->cpi, x, plane, plane_bsize, block, blk_row, blk_col,
tx_size, &this_rd_stats.dist, &this_rd_stats.sse,
OUTPUT_HAS_DECODED_PIXELS);
goto CALCULATE_RD;
}
#endif // CONFIG_DPCM_INTRA
av1_subtract_txb(x, plane, plane_bsize, blk_col, blk_row, tx_size);
}
......@@ -2120,9 +2101,6 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
cfl_store(xd->cfl, dst, dst_stride, blk_row, blk_col, tx_size, plane_bsize);
}
#endif
#if CONFIG_DPCM_INTRA
CALCULATE_RD : {}
#endif // CONFIG_DPCM_INTRA
rd = RDCOST(x->rdmult, 0, this_rd_stats.dist);
if (args->this_rd + rd > args->best_rd) {
args->exit_early = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment