OSUOSL/Nero are experiencing Internet connectivity problems. This affects us as we're hosted with OSUOSL. We apologize for the inconvenience.

Commit a97fd6c4 authored by Sarah Parker's avatar Sarah Parker Committed by Gerrit Code Review

Merge "Update VP9_PROB_COST_SHIFT to VP10_PROB_COST_SHIFT" into nextgenv2

parents 4c7e1cd9 66329154
......@@ -15,7 +15,7 @@
#endif // CONFIG_ANS
#include "vp10/common/entropy.h"
/* round(-log2(i/256.) * (1 << VP9_PROB_COST_SHIFT))
/* round(-log2(i/256.) * (1 << VP10_PROB_COST_SHIFT))
Begins with a bogus entry for simpler addressing. */
const uint16_t vp10_prob_cost[256] = {
4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
......@@ -41,7 +41,7 @@ const uint16_t vp10_prob_cost[256] = {
};
#if CONFIG_ANS
// round(-log2(i/1024.) * (1 << VP9_PROB_COST_SHIFT))
// round(-log2(i/1024.) * (1 << VP10_PROB_COST_SHIFT))
static const uint16_t vp10_prob_cost10[1024] = {
5120, 5120, 4608, 4308, 4096, 3931, 3796, 3683, 3584, 3497, 3419, 3349, 3284,
3225, 3171, 3120, 3072, 3027, 2985, 2945, 2907, 2871, 2837, 2804, 2772, 2742,
......
......@@ -24,7 +24,7 @@ extern "C" {
extern const uint16_t vp10_prob_cost[256];
// The factor to scale from cost in bits to cost in vp10_prob_cost units.
#define VP9_PROB_COST_SHIFT 9
#define VP10_PROB_COST_SHIFT 9
#define vp10_cost_zero(prob) (vp10_prob_cost[prob])
......@@ -34,7 +34,7 @@ extern const uint16_t vp10_prob_cost[256];
// Cost of coding an n bit literal, using 128 (i.e. 50%) probability
// for each bit.
#define vp10_cost_literal(n) ((n) * (1 << VP9_PROB_COST_SHIFT))
#define vp10_cost_literal(n) ((n) * (1 << VP10_PROB_COST_SHIFT))
static INLINE unsigned int cost_branch256(const unsigned int ct[2],
vpx_prob p) {
......
......@@ -84,7 +84,7 @@ static int mv_err_cost(const MV *mv, const MV *ref, const int *mvjcost,
// accuracy in either bit cost or error cost will cause it to overflow.
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, mvjcost, mvcost) * error_per_bit,
RDDIV_BITS + VP9_PROB_COST_SHIFT - RD_EPB_SHIFT +
RDDIV_BITS + VP10_PROB_COST_SHIFT - RD_EPB_SHIFT +
PIXEL_TRANSFORM_ERROR_SCALE);
}
return 0;
......@@ -95,7 +95,7 @@ static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
const MV diff = { (mv->row - ref->row) * 8, (mv->col - ref->col) * 8 };
return ROUND_POWER_OF_TWO(
(unsigned)mv_cost(&diff, x->nmvjointsadcost, x->mvsadcost) * sad_per_bit,
VP9_PROB_COST_SHIFT);
VP10_PROB_COST_SHIFT);
}
void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
......
......@@ -73,8 +73,8 @@ static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
rsi.restoration_type = RESTORE_NONE;
err = try_restoration_frame(sd, cpi, &rsi, partial_frame);
bits = 0;
best_cost =
RDCOST_DBL(x->rdmult, x->rddiv, (bits << (VP9_PROB_COST_SHIFT - 4)), err);
best_cost = RDCOST_DBL(x->rdmult, x->rddiv,
(bits << (VP10_PROB_COST_SHIFT - 4)), err);
for (i = 0; i < restoration_levels; ++i) {
rsi.restoration_type = RESTORE_BILATERAL;
rsi.restoration_level = i;
......@@ -83,7 +83,7 @@ static int search_bilateral_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
// when RDCOST is used. However below we just scale both in the correct
// ratios appropriately but not exactly by these values.
bits = restoration_level_bits;
cost = RDCOST_DBL(x->rdmult, x->rddiv, (bits << (VP9_PROB_COST_SHIFT - 4)),
cost = RDCOST_DBL(x->rdmult, x->rddiv, (bits << (VP10_PROB_COST_SHIFT - 4)),
err);
if (cost < best_cost) {
restoration_best = i;
......@@ -524,8 +524,8 @@ static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, VP10_COMP *cpi,
rsi.restoration_type = RESTORE_NONE;
err = try_restoration_frame(src, cpi, &rsi, partial_frame);
bits = 0;
cost_norestore =
RDCOST_DBL(x->rdmult, x->rddiv, (bits << (VP9_PROB_COST_SHIFT - 4)), err);
cost_norestore = RDCOST_DBL(x->rdmult, x->rddiv,
(bits << (VP10_PROB_COST_SHIFT - 4)), err);
#if CONFIG_VP9_HIGHBITDEPTH
if (cm->use_highbitdepth)
......@@ -561,8 +561,8 @@ static int search_wiener_filter(const YV12_BUFFER_CONFIG *src, VP10_COMP *cpi,
memcpy(rsi.hfilter, hfilter, sizeof(rsi.hfilter));
err = try_restoration_frame(src, cpi, &rsi, partial_frame);
bits = WIENER_FILT_BITS;
cost_wiener =
RDCOST_DBL(x->rdmult, x->rddiv, (bits << (VP9_PROB_COST_SHIFT - 4)), err);
cost_wiener = RDCOST_DBL(x->rdmult, x->rddiv,
(bits << (VP10_PROB_COST_SHIFT - 4)), err);
vpx_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
......
......@@ -555,7 +555,7 @@ void vp10_model_rd_from_var_lapndz(int64_t var, unsigned int n_log2,
(((uint64_t)qstep * qstep << (n_log2 + 10)) + (var >> 1)) / var;
const int xsq_q10 = (int)VPXMIN(xsq_q10_64, MAX_XSQ_Q10);
model_rd_norm(xsq_q10, &r_q10, &d_q10);
*rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - VP9_PROB_COST_SHIFT);
*rate = ROUND_POWER_OF_TWO(r_q10 << n_log2, 10 - VP10_PROB_COST_SHIFT);
*dist = (var * (int64_t)d_q10 + 512) >> 10;
}
}
......
......@@ -30,10 +30,10 @@ extern "C" {
#define RD_EPB_SHIFT 6
#define RDCOST(RM, DM, R, D) \
(ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP9_PROB_COST_SHIFT) + (D << DM))
(ROUND_POWER_OF_TWO(((int64_t)R) * (RM), VP10_PROB_COST_SHIFT) + (D << DM))
#define RDCOST_DBL(RM, DM, R, D) \
(((((double)(R)) * (RM)) / (double)(1 << VP9_PROB_COST_SHIFT)) + \
#define RDCOST_DBL(RM, DM, R, D) \
(((((double)(R)) * (RM)) / (double)(1 << VP10_PROB_COST_SHIFT)) + \
((double)(D) * (1 << (DM))))
#define QIDX_SKIP_THRESH 115
......
......@@ -754,7 +754,7 @@ static void model_rd_from_sse(const VP10_COMP *const cpi,
if (quantizer < 120)
*rate = (int)((square_error * (280 - quantizer)) >>
(16 - VP9_PROB_COST_SHIFT));
(16 - VP10_PROB_COST_SHIFT));
else
*rate = 0;
*dist = (square_error * quantizer) >> 8;
......
......@@ -78,7 +78,7 @@ static int remap_prob(int v, int m) {
static int prob_diff_update_cost(vpx_prob newp, vpx_prob oldp) {
int delp = remap_prob(newp, oldp);
return update_bits[delp] << VP9_PROB_COST_SHIFT;
return update_bits[delp] << VP10_PROB_COST_SHIFT;
}
static void encode_uniform(vp10_writer *w, int v) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment