Commit 8a6af9f9 authored by Scott LaVarnway's avatar Scott LaVarnway

Improved uv mv calculations in build inter predictor

Changed calculations to use shifts instead of if-then-else.
Eliminates branches.

Change-Id: I11b75e8bb305301ffd9cb577fb7df059a3cf9ea4
parent f357e5e2
......@@ -9,6 +9,7 @@
*/
#include <limits.h>
#include "vpx_config.h"
#include "vpx/vpx_integer.h"
#include "recon.h"
......@@ -242,19 +243,10 @@ void vp8_build_inter16x16_predictors_mbuv(MACROBLOCKD *x)
int pre_stride = x->block[16].pre_stride;
/* calc uv motion vectors */
if (mv_row < 0)
mv_row -= 1;
else
mv_row += 1;
if (mv_col < 0)
mv_col -= 1;
else
mv_col += 1;
mv_row += 1 | (mv_row >> (sizeof(int) * CHAR_BIT - 1));
mv_col += 1 | (mv_col >> (sizeof(int) * CHAR_BIT - 1));
mv_row /= 2;
mv_col /= 2;
mv_row &= x->fullpixel_mask;
mv_col &= x->fullpixel_mask;
......@@ -295,8 +287,7 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
+ x->block[yoffset+4].bmi.mv.as_mv.row
+ x->block[yoffset+5].bmi.mv.as_mv.row;
if (temp < 0) temp -= 4;
else temp += 4;
temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
......@@ -305,15 +296,11 @@ void vp8_build_inter4x4_predictors_mbuv(MACROBLOCKD *x)
+ x->block[yoffset+4].bmi.mv.as_mv.col
+ x->block[yoffset+5].bmi.mv.as_mv.col;
if (temp < 0) temp -= 4;
else temp += 4;
temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
x->block[voffset].bmi.mv.as_mv.row =
x->block[uoffset].bmi.mv.as_mv.row ;
x->block[voffset].bmi.mv.as_mv.col =
x->block[uoffset].bmi.mv.as_mv.col ;
x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
}
}
......@@ -430,19 +417,10 @@ void vp8_build_inter16x16_predictors_mb(MACROBLOCKD *x,
}
/* calc uv motion vectors */
if ( _16x16mv.as_mv.row < 0)
_16x16mv.as_mv.row -= 1;
else
_16x16mv.as_mv.row += 1;
if (_16x16mv.as_mv.col < 0)
_16x16mv.as_mv.col -= 1;
else
_16x16mv.as_mv.col += 1;
_16x16mv.as_mv.row += 1 | (_16x16mv.as_mv.row >> (sizeof(int) * CHAR_BIT - 1));
_16x16mv.as_mv.col += 1 | (_16x16mv.as_mv.col >> (sizeof(int) * CHAR_BIT - 1));
_16x16mv.as_mv.row /= 2;
_16x16mv.as_mv.col /= 2;
_16x16mv.as_mv.row &= x->fullpixel_mask;
_16x16mv.as_mv.col &= x->fullpixel_mask;
......@@ -559,8 +537,7 @@ void build_4x4uvmvs(MACROBLOCKD *x)
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.row
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.row;
if (temp < 0) temp -= 4;
else temp += 4;
temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
x->block[uoffset].bmi.mv.as_mv.row = (temp / 8) & x->fullpixel_mask;
......@@ -569,18 +546,14 @@ void build_4x4uvmvs(MACROBLOCKD *x)
+ x->mode_info_context->bmi[yoffset + 4].mv.as_mv.col
+ x->mode_info_context->bmi[yoffset + 5].mv.as_mv.col;
if (temp < 0) temp -= 4;
else temp += 4;
temp += 4 + ((temp >> (sizeof(int) * CHAR_BIT - 1)) << 3);
x->block[uoffset].bmi.mv.as_mv.col = (temp / 8) & x->fullpixel_mask;
if (x->mode_info_context->mbmi.need_to_clamp_mvs)
clamp_uvmv_to_umv_border(&x->block[uoffset].bmi.mv.as_mv, x);
x->block[voffset].bmi.mv.as_mv.row =
x->block[uoffset].bmi.mv.as_mv.row ;
x->block[voffset].bmi.mv.as_mv.col =
x->block[uoffset].bmi.mv.as_mv.col ;
x->block[voffset].bmi.mv.as_int = x->block[uoffset].bmi.mv.as_int;
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment