Commit 7dec6c4c authored by Angie Chiang's avatar Angie Chiang

Move eob/lossless check into av1_optimize_b

This will guarantee that av1_optimize_b will be turned off when
lossless mode is on

Remove heuristic lossless check in optimize_b_greedy

Change-Id: I636c776f3f6b632eb03bc57a470ea43aae4fe0f6
parent 10e1da9a
......@@ -140,20 +140,19 @@ get_token_bit_costs(unsigned int token_costs[2][COEFF_CONTEXTS][ENTROPY_TOKENS],
#if USE_GREEDY_OPTIMIZE_B
typedef struct av1_token_state {
typedef struct av1_token_state_greedy {
int16_t token;
tran_low_t qc;
tran_low_t dqc;
} av1_token_state;
} av1_token_state_greedy;
int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
TX_SIZE tx_size, int ctx) {
#if !CONFIG_PVQ
static int optimize_b_greedy(const AV1_COMMON *cm, MACROBLOCK *mb, int plane,
int block, TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
const int ref = is_inter_block(&xd->mi[0]->mbmi);
av1_token_state tokens[MAX_TX_SQUARE + 1][2];
av1_token_state_greedy tokens[MAX_TX_SQUARE + 1][2];
uint8_t token_cache[MAX_TX_SQUARE];
const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
......@@ -189,21 +188,12 @@ int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
mb->token_costs[txsize_sqr_map[tx_size]][plane_type][ref];
const int default_eob = tx_size_2d[tx_size];
assert((mb->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
assert(mb->qindex > 0);
assert((!plane_type && !plane) || (plane_type && plane));
assert(eob <= default_eob);
int64_t rdmult = (mb->rdmult * plane_rd_mult[ref][plane_type]) >> 1;
/* CpuSpeedTest uses "--min-q=0 --max-q=0" and expects 100dB psnr
* This creates conflict with search for a better EOB position
* The line below is to make sure EOB search is disabled at this corner case.
*/
#if !CONFIG_NEW_QUANT && !CONFIG_AOM_QM
if (dq_step[1] <= 4) {
rdmult = 1;
}
#endif
int64_t rate0, rate1;
for (i = 0; i < eob; i++) {
......@@ -479,19 +469,11 @@ int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
mb->plane[plane].eobs[block] = final_eob;
return final_eob;
#else // !CONFIG_PVQ
(void)cm;
(void)tx_size;
(void)ctx;
struct macroblock_plane *const p = &mb->plane[plane];
return p->eobs[block];
#endif // !CONFIG_PVQ
}
#else // USE_GREEDY_OPTIMIZE_B
typedef struct av1_token_state {
typedef struct av1_token_state_org {
int64_t error;
int rate;
int16_t next;
......@@ -499,16 +481,15 @@ typedef struct av1_token_state {
tran_low_t qc;
tran_low_t dqc;
uint8_t best_index;
} av1_token_state;
} av1_token_state_org;
int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
TX_SIZE tx_size, int ctx) {
#if !CONFIG_PVQ
static int optimize_b_org(const AV1_COMMON *cm, MACROBLOCK *mb, int plane,
int block, TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
struct macroblockd_plane *const pd = &xd->plane[plane];
const int ref = is_inter_block(&xd->mi[0]->mbmi);
av1_token_state tokens[MAX_TX_SQUARE + 1][2];
av1_token_state_org tokens[MAX_TX_SQUARE + 1][2];
uint8_t token_cache[MAX_TX_SQUARE];
const tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
tran_low_t *const qcoeff = BLOCK_OFFSET(p->qcoeff, block);
......@@ -558,11 +539,10 @@ int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
? av1_get_qindex(&cm->seg, xd->mi[0]->mbmi.segment_id,
cm->base_qindex)
: cm->base_qindex;
if (qindex == 0) {
assert((qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
}
assert(qindex > 0);
(void)qindex;
#else
assert((mb->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
assert(mb->qindex > 0);
#endif
token_costs += band;
......@@ -850,16 +830,31 @@ int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
mb->plane[plane].eobs[block] = final_eob;
assert(final_eob <= default_eob);
return final_eob;
#else // !CONFIG_PVQ
}
#endif // USE_GREEDY_OPTIMIZE_B
int av1_optimize_b(const AV1_COMMON *cm, MACROBLOCK *mb, int plane, int block,
TX_SIZE tx_size, int ctx) {
MACROBLOCKD *const xd = &mb->e_mbd;
struct macroblock_plane *const p = &mb->plane[plane];
const int eob = p->eobs[block];
assert((mb->qindex == 0) ^ (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0));
if (eob == 0) return eob;
if (xd->lossless[xd->mi[0]->mbmi.segment_id]) return eob;
#if CONFIG_PVQ
(void)cm;
(void)tx_size;
(void)ctx;
struct macroblock_plane *const p = &mb->plane[plane];
return p->eobs[block];
#endif // !CONFIG_PVQ
}
return eob;
#endif
#if USE_GREEDY_OPTIMIZE_B
return optimize_b_greedy(cm, mb, plane, block, tx_size, ctx);
#else // USE_GREEDY_OPTIMIZE_B
return optimize_b_org(cm, mb, plane, block, tx_size, ctx);
#endif // USE_GREEDY_OPTIMIZE_B
}
#if !CONFIG_PVQ
#if CONFIG_HIGHBITDEPTH
......@@ -1150,8 +1145,7 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
#endif
#if !CONFIG_PVQ
if (p->eobs[block] && !xd->lossless[xd->mi[0]->mbmi.segment_id])
av1_optimize_b(cm, x, plane, block, tx_size, ctx);
av1_optimize_b(cm, x, plane, block, tx_size, ctx);
av1_set_txb_context(x, plane, block, tx_size, a, l);
......@@ -1450,9 +1444,7 @@ void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
if (args->enable_optimize_b) {
av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
ctx, AV1_XFORM_QUANT_FP);
if (p->eobs[block]) {
av1_optimize_b(cm, x, plane, block, tx_size, ctx);
}
av1_optimize_b(cm, x, plane, block, tx_size, ctx);
} else {
av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
ctx, AV1_XFORM_QUANT_B);
......
......@@ -749,8 +749,7 @@ int64_t av1_search_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
av1_invalid_rd_stats(&this_rd_stats);
av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
coeff_ctx, AV1_XFORM_QUANT_FP);
if (x->plane[plane].eobs[block] && !xd->lossless[mbmi->segment_id])
av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
av1_dist_block(cpi, x, plane, plane_bsize, block, blk_row, blk_col, tx_size,
&this_rd_stats.dist, &this_rd_stats.sse,
OUTPUT_HAS_PREDICTED_PIXELS);
......@@ -771,8 +770,7 @@ int64_t av1_search_txk_type(const AV1_COMP *cpi, MACROBLOCK *x, int plane,
// copy the best result in the above tx_type search for loop
av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
coeff_ctx, AV1_XFORM_QUANT_FP);
if (x->plane[plane].eobs[block] && !xd->lossless[mbmi->segment_id])
av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
if (!is_inter_block(mbmi)) {
// intra mode needs decoded result such that the next transform block
// can use it for prediction.
......
......@@ -1542,8 +1542,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
const int coeff_ctx = combine_entropy_contexts(*a, *l);
av1_xform_quant(cm, x, plane, block, blk_row, blk_col, plane_bsize, tx_size,
coeff_ctx, AV1_XFORM_QUANT_FP);
if (x->plane[plane].eobs[block] && !xd->lossless[mbmi->segment_id])
av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
av1_optimize_b(cm, x, plane, block, tx_size, coeff_ctx);
if (!is_inter_block(mbmi)) {
struct macroblock_plane *const p = &x->plane[plane];
......@@ -2897,9 +2896,7 @@ static int64_t rd_pick_intra_sub_8x8_y_subblock_mode(
#endif // CONFIG_CB4X4
BLOCK_8X8, tx_size, coeff_ctx, xform_quant);
if (!is_lossless) {
av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
}
av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
ratey +=
av1_cost_coeffs(cpi, x, 0, block, tx_size, scan_order, tempa + idx,
......@@ -5206,8 +5203,7 @@ static int64_t encode_inter_mb_segment_sub8x8(
coeff_ctx = combine_entropy_contexts(*(ta + (k & 1)), *(tl + (k >> 1)));
av1_xform_quant(cm, x, 0, block, idy + (i >> 1), idx + (i & 0x01),
BLOCK_8X8, tx_size, coeff_ctx, AV1_XFORM_QUANT_FP);
if (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0)
av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
av1_dist_block(cpi, x, 0, BLOCK_8X8, block, idy + (i >> 1),
idx + (i & 0x1), tx_size, &dist, &ssz,
OUTPUT_HAS_PREDICTED_PIXELS);
......@@ -6569,8 +6565,7 @@ static int64_t rd_pick_inter_best_sub8x8_mode(
dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
av1_xform_quant(cm, x, 0, block, idy + idy_, idx + idx_, BLOCK_8X8,
tx_size, coeff_ctx, AV1_XFORM_QUANT_FP);
if (xd->lossless[xd->mi[0]->mbmi.segment_id] == 0)
av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
av1_optimize_b(cm, x, 0, block, tx_size, coeff_ctx);
eob = p->eobs[block];
av1_inverse_transform_block(xd, dqcoeff, tx_type, tx_size,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment