Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Open sidebar
Xiph.Org
aom-rav1e
Commits
d283d9bb
Commit
d283d9bb
authored
Dec 06, 2010
by
Scott LaVarnway
Committed by
Code Review
Dec 06, 2010
Browse files
Options
Browse Files
Download
Plain Diff
Merge "Improve MV prediction accuracy to achieve performance gain"
parents
8534071d
c3bbb291
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
487 additions
and
42 deletions
+487
-42
vp8/encoder/firstpass.c
vp8/encoder/firstpass.c
+2
-2
vp8/encoder/mcomp.c
vp8/encoder/mcomp.c
+26
-24
vp8/encoder/mcomp.h
vp8/encoder/mcomp.h
+4
-2
vp8/encoder/onyx_if.c
vp8/encoder/onyx_if.c
+73
-1
vp8/encoder/onyx_int.h
vp8/encoder/onyx_int.h
+10
-0
vp8/encoder/pickinter.c
vp8/encoder/pickinter.c
+4
-4
vp8/encoder/rdopt.c
vp8/encoder/rdopt.c
+366
-7
vp8/encoder/temporal_filter.c
vp8/encoder/temporal_filter.c
+2
-2
No files found.
vp8/encoder/firstpass.c
View file @
d283d9bb
...
...
@@ -472,7 +472,7 @@ void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *
xd
->
pre
.
y_buffer
=
recon_buffer
->
y_buffer
+
recon_yoffset
;
// Initial step/diamond search centred on best mv
tmp_err
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
ref_mv
,
&
tmp_mv
,
step_param
,
x
->
errorperbit
,
&
num00
,
&
v_fn_ptr
,
x
->
mvsadcost
,
x
->
mvcost
);
tmp_err
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
ref_mv
,
&
tmp_mv
,
step_param
,
x
->
errorperbit
,
&
num00
,
&
v_fn_ptr
,
x
->
mvsadcost
,
x
->
mvcost
,
ref_mv
);
if
(
tmp_err
<
INT_MAX
-
new_mv_mode_penalty
)
tmp_err
+=
new_mv_mode_penalty
;
...
...
@@ -495,7 +495,7 @@ void vp8_first_pass_motion_search(VP8_COMP *cpi, MACROBLOCK *x, MV *ref_mv, MV *
num00
--
;
else
{
tmp_err
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
ref_mv
,
&
tmp_mv
,
step_param
+
n
,
x
->
errorperbit
,
&
num00
,
&
v_fn_ptr
,
x
->
mvsadcost
,
x
->
mvcost
);
tmp_err
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
ref_mv
,
&
tmp_mv
,
step_param
+
n
,
x
->
errorperbit
,
&
num00
,
&
v_fn_ptr
,
x
->
mvsadcost
,
x
->
mvcost
,
ref_mv
);
if
(
tmp_err
<
INT_MAX
-
new_mv_mode_penalty
)
tmp_err
+=
new_mv_mode_penalty
;
...
...
vp8/encoder/mcomp.c
View file @
d283d9bb
...
...
@@ -913,7 +913,8 @@ int vp8_diamond_search_sad
int
*
num00
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvsadcost
[
2
],
int
*
mvcost
[
2
]
int
*
mvcost
[
2
],
MV
*
center_mv
)
{
int
i
,
j
,
step
;
...
...
@@ -949,7 +950,7 @@ int vp8_diamond_search_sad
(
ref_row
>
x
->
mv_row_min
)
&&
(
ref_row
<
x
->
mv_row_max
))
{
// Check the starting position
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
in_what
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
in_what
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
}
// search_param determines the length of the initial step and hence the number of iterations
...
...
@@ -982,7 +983,7 @@ int vp8_diamond_search_sad
{
this_mv
.
row
=
this_row_offset
<<
3
;
this_mv
.
col
=
this_col_offset
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1013,7 +1014,7 @@ int vp8_diamond_search_sad
return
INT_MAX
;
return
fn_ptr
->
vf
(
what
,
what_stride
,
best_address
,
in_what_stride
,
(
unsigned
int
*
)(
&
thissad
))
+
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvcost
,
error_per_bit
);
+
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvcost
,
error_per_bit
);
}
int
vp8_diamond_search_sadx4
...
...
@@ -1028,7 +1029,8 @@ int vp8_diamond_search_sadx4
int
*
num00
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvsadcost
[
2
],
int
*
mvcost
[
2
]
int
*
mvcost
[
2
],
MV
*
center_mv
)
{
int
i
,
j
,
step
;
...
...
@@ -1064,7 +1066,7 @@ int vp8_diamond_search_sadx4
(
ref_row
>
x
->
mv_row_min
)
&&
(
ref_row
<
x
->
mv_row_max
))
{
// Check the starting position
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
in_what
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
in_what
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
}
// search_param determines the length of the initial step and hence the number of iterations
...
...
@@ -1108,7 +1110,7 @@ int vp8_diamond_search_sadx4
{
this_mv
.
row
=
(
best_mv
->
row
+
ss
[
i
].
mv
.
row
)
<<
3
;
this_mv
.
col
=
(
best_mv
->
col
+
ss
[
i
].
mv
.
col
)
<<
3
;
sad_array
[
t
]
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
sad_array
[
t
]
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
sad_array
[
t
]
<
bestsad
)
{
...
...
@@ -1137,7 +1139,7 @@ int vp8_diamond_search_sadx4
{
this_mv
.
row
=
this_row_offset
<<
3
;
this_mv
.
col
=
this_col_offset
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1168,12 +1170,12 @@ int vp8_diamond_search_sadx4
return
INT_MAX
;
return
fn_ptr
->
vf
(
what
,
what_stride
,
best_address
,
in_what_stride
,
(
unsigned
int
*
)(
&
thissad
))
+
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvcost
,
error_per_bit
);
+
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvcost
,
error_per_bit
);
}
#if !(CONFIG_REALTIME_ONLY)
int
vp8_full_search_sad
(
MACROBLOCK
*
x
,
BLOCK
*
b
,
BLOCKD
*
d
,
MV
*
ref_mv
,
int
error_per_bit
,
int
distance
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvcost
[
2
],
int
*
mvsadcost
[
2
])
int
vp8_full_search_sad
(
MACROBLOCK
*
x
,
BLOCK
*
b
,
BLOCKD
*
d
,
MV
*
ref_mv
,
int
error_per_bit
,
int
distance
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvcost
[
2
],
int
*
mvsadcost
[
2
]
,
MV
*
center_mv
)
{
unsigned
char
*
what
=
(
*
(
b
->
base_src
)
+
b
->
src
);
int
what_stride
=
b
->
src_stride
;
...
...
@@ -1211,7 +1213,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
// Baseline value at the centre
//bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
}
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
...
...
@@ -1239,7 +1241,7 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
this_mv
.
col
=
c
<<
3
;
//thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
//thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
//mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
//mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1258,12 +1260,12 @@ int vp8_full_search_sad(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int erro
if
(
bestsad
<
INT_MAX
)
return
fn_ptr
->
vf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
(
unsigned
int
*
)(
&
thissad
))
+
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvcost
,
error_per_bit
);
+
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvcost
,
error_per_bit
);
else
return
INT_MAX
;
}
int
vp8_full_search_sadx3
(
MACROBLOCK
*
x
,
BLOCK
*
b
,
BLOCKD
*
d
,
MV
*
ref_mv
,
int
error_per_bit
,
int
distance
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvcost
[
2
],
int
*
mvsadcost
[
2
])
int
vp8_full_search_sadx3
(
MACROBLOCK
*
x
,
BLOCK
*
b
,
BLOCKD
*
d
,
MV
*
ref_mv
,
int
error_per_bit
,
int
distance
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvcost
[
2
],
int
*
mvsadcost
[
2
]
,
MV
*
center_mv
)
{
unsigned
char
*
what
=
(
*
(
b
->
base_src
)
+
b
->
src
);
int
what_stride
=
b
->
src_stride
;
...
...
@@ -1301,7 +1303,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
(
ref_row
>
x
->
mv_row_min
)
&&
(
ref_row
<
x
->
mv_row_max
))
{
// Baseline value at the centre
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
}
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
...
...
@@ -1336,7 +1338,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
thissad
<
bestsad
)
{
this_mv
.
col
=
c
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1359,7 +1361,7 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
thissad
<
bestsad
)
{
this_mv
.
col
=
c
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1381,14 +1383,14 @@ int vp8_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
bestsad
<
INT_MAX
)
return
fn_ptr
->
vf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
(
unsigned
int
*
)(
&
thissad
))
+
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvcost
,
error_per_bit
);
+
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvcost
,
error_per_bit
);
else
return
INT_MAX
;
}
#endif
int
vp8_full_search_sadx8
(
MACROBLOCK
*
x
,
BLOCK
*
b
,
BLOCKD
*
d
,
MV
*
ref_mv
,
int
error_per_bit
,
int
distance
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvcost
[
2
],
int
*
mvsadcost
[
2
])
int
vp8_full_search_sadx8
(
MACROBLOCK
*
x
,
BLOCK
*
b
,
BLOCKD
*
d
,
MV
*
ref_mv
,
int
error_per_bit
,
int
distance
,
vp8_variance_fn_ptr_t
*
fn_ptr
,
int
*
mvcost
[
2
],
int
*
mvsadcost
[
2
]
,
MV
*
center_mv
)
{
unsigned
char
*
what
=
(
*
(
b
->
base_src
)
+
b
->
src
);
int
what_stride
=
b
->
src_stride
;
...
...
@@ -1427,7 +1429,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
(
ref_row
>
x
->
mv_row_min
)
&&
(
ref_row
<
x
->
mv_row_max
))
{
// Baseline value at the centre
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
bestsad
=
fn_ptr
->
sdf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
0x7fffffff
)
+
vp8_mv_err_cost
(
ref_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
}
// Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
...
...
@@ -1462,7 +1464,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
thissad
<
bestsad
)
{
this_mv
.
col
=
c
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1491,7 +1493,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
thissad
<
bestsad
)
{
this_mv
.
col
=
c
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1514,7 +1516,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
thissad
<
bestsad
)
{
this_mv
.
col
=
c
<<
3
;
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvsadcost
,
error_per_bit
);
thissad
+=
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvsadcost
,
error_per_bit
);
if
(
thissad
<
bestsad
)
{
...
...
@@ -1535,7 +1537,7 @@ int vp8_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, MV *ref_mv, int er
if
(
bestsad
<
INT_MAX
)
return
fn_ptr
->
vf
(
what
,
what_stride
,
bestaddress
,
in_what_stride
,
(
unsigned
int
*
)(
&
thissad
))
+
vp8_mv_err_cost
(
&
this_mv
,
ref
_mv
,
mvcost
,
error_per_bit
);
+
vp8_mv_err_cost
(
&
this_mv
,
center
_mv
,
mvcost
,
error_per_bit
);
else
return
INT_MAX
;
}
...
...
vp8/encoder/mcomp.h
View file @
d283d9bb
...
...
@@ -67,7 +67,8 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int distance, \
vp8_variance_fn_ptr_t *fn_ptr, \
int *mvcost[2], \
int *mvsadcost[2] \
int *mvsadcost[2], \
MV *center_mv \
)
#define prototype_diamond_search_sad(sym)\
...
...
@@ -83,7 +84,8 @@ extern fractional_mv_step_fp vp8_skip_fractional_mv_step;
int *num00, \
vp8_variance_fn_ptr_t *fn_ptr, \
int *mvsadcost[2], \
int *mvcost[2] \
int *mvcost[2], \
MV *center_mv \
)
#if ARCH_X86 || ARCH_X86_64
...
...
vp8/encoder/onyx_if.c
View file @
d283d9bb
...
...
@@ -261,6 +261,21 @@ static void setup_features(VP8_COMP *cpi)
void
vp8_dealloc_compressor_data
(
VP8_COMP
*
cpi
)
{
// Delete last frame MV storage buffers
if
(
cpi
->
lfmv
!=
0
)
vpx_free
(
cpi
->
lfmv
);
cpi
->
lfmv
=
0
;
if
(
cpi
->
lf_ref_frame_sign_bias
!=
0
)
vpx_free
(
cpi
->
lf_ref_frame_sign_bias
);
cpi
->
lf_ref_frame_sign_bias
=
0
;
if
(
cpi
->
lf_ref_frame
!=
0
)
vpx_free
(
cpi
->
lf_ref_frame
);
cpi
->
lf_ref_frame
=
0
;
// Delete sementation map
if
(
cpi
->
segmentation_map
!=
0
)
...
...
@@ -2127,7 +2142,10 @@ VP8_PTR vp8_create_compressor(VP8_CONFIG *oxcf)
cpi
->
alt_is_last
=
0
;
cpi
->
gold_is_alt
=
0
;
// allocate memory for storing last frame's MVs for MV prediction.
CHECK_MEM_ERROR
(
cpi
->
lfmv
,
vpx_calloc
((
cpi
->
common
.
mb_rows
+
1
)
*
(
cpi
->
common
.
mb_cols
+
1
),
sizeof
(
int_mv
)));
CHECK_MEM_ERROR
(
cpi
->
lf_ref_frame_sign_bias
,
vpx_calloc
((
cpi
->
common
.
mb_rows
+
1
)
*
(
cpi
->
common
.
mb_cols
+
1
),
sizeof
(
int
)));
CHECK_MEM_ERROR
(
cpi
->
lf_ref_frame
,
vpx_calloc
((
cpi
->
common
.
mb_rows
+
1
)
*
(
cpi
->
common
.
mb_cols
+
1
),
sizeof
(
int
)));
// Create the encoder segmentation map and set all entries to 0
CHECK_MEM_ERROR
(
cpi
->
segmentation_map
,
vpx_calloc
(
cpi
->
common
.
mb_rows
*
cpi
->
common
.
mb_cols
,
1
));
...
...
@@ -4190,6 +4208,60 @@ static void encode_frame_to_data_rate
}
#endif
// Update the GF useage maps.
// This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter
vp8_update_gf_useage_maps
(
cpi
,
cm
,
&
cpi
->
mb
);
////////////////////////////////
////////////////////////////////
// This frame's MVs are saved and will be used in next frame's MV prediction.
if
(
cm
->
show_frame
)
//do not save for altref frame
{
int
mb_row
;
int
mb_col
;
MODE_INFO
*
tmp
=
cm
->
mip
;
//point to beginning of allocated MODE_INFO arrays.
//static int last_video_frame = 0;
/*
if (cm->current_video_frame == 0) //first frame: set to 0
{
for (mb_row = 0; mb_row < cm->mb_rows+1; mb_row ++)
{
for (mb_col = 0; mb_col < cm->mb_cols+1; mb_col ++)
{
cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride)].as_int = 0;
cpi->lf_ref_frame_sign_bias[mb_col + mb_row*(cm->mode_info_stride)] = 0;
cpi->lf_ref_frame[mb_col + mb_row*(cm->mode_info_stride)] = 0;
}
}
}else
*/
if
(
cm
->
frame_type
!=
KEY_FRAME
)
{
for
(
mb_row
=
0
;
mb_row
<
cm
->
mb_rows
+
1
;
mb_row
++
)
{
for
(
mb_col
=
0
;
mb_col
<
cm
->
mb_cols
+
1
;
mb_col
++
)
{
if
(
tmp
->
mbmi
.
ref_frame
!=
INTRA_FRAME
)
cpi
->
lfmv
[
mb_col
+
mb_row
*
(
cm
->
mode_info_stride
)].
as_int
=
tmp
->
mbmi
.
mv
.
as_int
;
cpi
->
lf_ref_frame_sign_bias
[
mb_col
+
mb_row
*
(
cm
->
mode_info_stride
)]
=
cm
->
ref_frame_sign_bias
[
tmp
->
mbmi
.
ref_frame
];
cpi
->
lf_ref_frame
[
mb_col
+
mb_row
*
(
cm
->
mode_info_stride
)]
=
tmp
->
mbmi
.
ref_frame
;
//printf("[%d, %d] ", cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride-1)].as_mv.row, cpi->lfmv[mb_col + mb_row*(cm->mode_info_stride-1)].as_mv.col);
tmp
++
;
}
}
//last_video_frame = cm->current_video_frame;
}
}
//printf("after: %d %d \n", cm->current_video_frame, cm->show_frame );
// Update the GF useage maps.
// This is done after completing the compression of a frame when all modes etc. are finalized but before loop filter
vp8_update_gf_useage_maps
(
cpi
,
cm
,
&
cpi
->
mb
);
...
...
vp8/encoder/onyx_int.h
View file @
d283d9bb
...
...
@@ -241,6 +241,12 @@ enum
BLOCK_MAX_SEGMENTS
};
typedef
union
{
unsigned
int
as_int
;
MV
as_mv
;
}
int_mv
;
/* facilitates rapid equality tests */
typedef
struct
{
...
...
@@ -668,6 +674,10 @@ typedef struct
unsigned
char
*
gf_active_flags
;
// Record of which MBs still refer to last golden frame either directly or through 0,0
int
gf_active_count
;
//Store last frame's MV info for next frame MV prediction
int_mv
*
lfmv
;
int
*
lf_ref_frame_sign_bias
;
int
*
lf_ref_frame
;
}
VP8_COMP
;
...
...
vp8/encoder/pickinter.c
View file @
d283d9bb
...
...
@@ -685,7 +685,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
#if 0
// Initial step Search
bestsme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, cpi->mb.mvcost);
bestsme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, cpi->mb.mvcost
, &best_ref_mv1
);
mode_mv[NEWMV].row = d->bmi.mv.as_mv.row;
mode_mv[NEWMV].col = d->bmi.mv.as_mv.col;
...
...
@@ -698,7 +698,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
num00--;
else
{
thissme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param + n, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, x->mvcost);
thissme = vp8_diamond_search_sad(x, b, d, &best_ref_mv1, &d->bmi.mv.as_mv, step_param + n, x->errorperbit, &num00, &cpi->fn_ptr, cpi->mb.mvsadcost, x->mvcost
, &best_ref_mv1
);
if (thissme < bestsme)
{
...
...
@@ -724,7 +724,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
}
else
{
bestsme
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
&
best_ref_mv1
,
&
d
->
bmi
.
mv
.
as_mv
,
step_param
,
sadpb
/
2
/*x->errorperbit*/
,
&
num00
,
&
cpi
->
fn_ptr
[
BLOCK_16X16
],
x
->
mvsadcost
,
x
->
mvcost
);
//sadpb < 9
bestsme
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
&
best_ref_mv1
,
&
d
->
bmi
.
mv
.
as_mv
,
step_param
,
sadpb
/
2
/*x->errorperbit*/
,
&
num00
,
&
cpi
->
fn_ptr
[
BLOCK_16X16
],
x
->
mvsadcost
,
x
->
mvcost
,
&
best_ref_mv1
);
//sadpb < 9
mode_mv
[
NEWMV
].
row
=
d
->
bmi
.
mv
.
as_mv
.
row
;
mode_mv
[
NEWMV
].
col
=
d
->
bmi
.
mv
.
as_mv
.
col
;
...
...
@@ -743,7 +743,7 @@ int vp8_pick_inter_mode(VP8_COMP *cpi, MACROBLOCK *x, int recon_yoffset, int rec
num00
--
;
else
{
thissme
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
&
best_ref_mv1
,
&
d
->
bmi
.
mv
.
as_mv
,
step_param
+
n
,
sadpb
/
4
/*x->errorperbit*/
,
&
num00
,
&
cpi
->
fn_ptr
[
BLOCK_16X16
],
x
->
mvsadcost
,
x
->
mvcost
);
//sadpb = 9
thissme
=
cpi
->
diamond_search_sad
(
x
,
b
,
d
,
&
best_ref_mv1
,
&
d
->
bmi
.
mv
.
as_mv
,
step_param
+
n
,
sadpb
/
4
/*x->errorperbit*/
,
&
num00
,
&
cpi
->
fn_ptr
[
BLOCK_16X16
],
x
->
mvsadcost
,
x
->
mvcost
,
&
best_ref_mv1
);
//sadpb = 9
if
(
thissme
<
bestsme
)
{
...
...
vp8/encoder/rdopt.c
View file @
d283d9bb
...
...
@@ -1156,7 +1156,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
bestsme
=
vp8_hex_search
(
x
,
c
,
e
,
best_ref_mv
,
&
mode_mv
[
NEW4X4
],
step_param
,
sadpb
/*x->errorperbit*/
,
&
num00
,
v_fn_ptr
,
x
->
mvsadcost
,
mvcost
);
else
{
bestsme
=
cpi
->
diamond_search_sad
(
x
,
c
,
e
,
best_ref_mv
,
&
mode_mv
[
NEW4X4
],
step_param
,
sadpb
/
2
/*x->errorperbit*/
,
&
num00
,
v_fn_ptr
,
x
->
mvsadcost
,
mvcost
);
bestsme
=
cpi
->
diamond_search_sad
(
x
,
c
,
e
,
best_ref_mv
,
&
mode_mv
[
NEW4X4
],
step_param
,
sadpb
/
2
/*x->errorperbit*/
,
&
num00
,
v_fn_ptr
,
x
->
mvsadcost
,
mvcost
,
best_ref_mv
);
n
=
num00
;
num00
=
0
;
...
...
@@ -1169,7 +1169,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
num00
--
;
else
{
thissme
=
cpi
->
diamond_search_sad
(
x
,
c
,
e
,
best_ref_mv
,
&
temp_mv
,
step_param
+
n
,
sadpb
/
2
/*x->errorperbit*/
,
&
num00
,
v_fn_ptr
,
x
->
mvsadcost
,
mvcost
);
thissme
=
cpi
->
diamond_search_sad
(
x
,
c
,
e
,
best_ref_mv
,
&
temp_mv
,
step_param
+
n
,
sadpb
/
2
/*x->errorperbit*/
,
&
num00
,
v_fn_ptr
,
x
->
mvsadcost
,
mvcost
,
best_ref_mv
);
if
(
thissme
<
bestsme
)
{
...
...
@@ -1184,7 +1184,7 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
// Should we do a full search (best quality only)
if
((
compressor_speed
==
0
)
&&
(
bestsme
>>
sseshift
)
>
4000
)
{
thissme
=
cpi
->
full_search_sad
(
x
,
c
,
e
,
best_ref_mv
,
sadpb
/
4
,
16
,
v_fn_ptr
,
x
->
mvcost
,
x
->
mvsadcost
);
thissme
=
cpi
->
full_search_sad
(
x
,
c
,
e
,
best_ref_mv
,
sadpb
/
4
,
16
,
v_fn_ptr
,
x
->
mvcost
,
x
->
mvsadcost
,
best_ref_mv
);
if
(
thissme
<
bestsme
)
{
...
...
@@ -1305,6 +1305,273 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x, MV *bes
}
/////////////////////////
static
void
mv_bias
(
const
MODE_INFO
*
x
,
int
refframe
,
int_mv
*
mvp
,
const
int
*
ref_frame_sign_bias
)
{
MV
xmv
;
xmv
=
x
->
mbmi
.
mv
.
as_mv
;
if
(
ref_frame_sign_bias
[
x
->
mbmi
.
ref_frame
]
!=
ref_frame_sign_bias
[
refframe
])
{
xmv
.
row
*=
-
1
;
xmv
.
col
*=
-
1
;
}
mvp
->
as_mv
=
xmv
;
}
static
void
lf_mv_bias
(
const
int
lf_ref_frame_sign_bias
,
int
refframe
,
int_mv
*
mvp
,
const
int
*
ref_frame_sign_bias
)
{
MV
xmv
;
xmv
=
mvp
->
as_mv
;
if
(
lf_ref_frame_sign_bias
!=
ref_frame_sign_bias
[
refframe
])
{
xmv
.
row
*=
-
1
;
xmv
.
col
*=
-
1
;
}
mvp
->
as_mv
=
xmv
;
}
static
void
vp8_clamp_mv
(
MV
*
mv
,
const
MACROBLOCKD
*
xd
)
{
if
(
mv
->
col
<
(
xd
->
mb_to_left_edge
-
LEFT_TOP_MARGIN
))
mv
->
col
=
xd
->
mb_to_left_edge
-
LEFT_TOP_MARGIN
;
else
if
(
mv
->
col
>
xd
->
mb_to_right_edge
+
RIGHT_BOTTOM_MARGIN
)
mv
->
col
=
xd
->
mb_to_right_edge
+
RIGHT_BOTTOM_MARGIN
;
if
(
mv
->
row
<
(
xd
->
mb_to_top_edge
-
LEFT_TOP_MARGIN
))
mv
->
row
=
xd
->
mb_to_top_edge
-
LEFT_TOP_MARGIN
;
else
if
(
mv
->
row
>
xd
->
mb_to_bottom_edge
+
RIGHT_BOTTOM_MARGIN
)
mv
->
row
=
xd
->
mb_to_bottom_edge
+
RIGHT_BOTTOM_MARGIN
;
}
static
void
swap
(
int
*
x
,
int
*
y
)
{
int
tmp
;
tmp
=
*
x
;
*
x
=
*
y
;
*
y
=
tmp
;
}
static
void
quicksortmv
(
int
arr
[],
int
left
,
int
right
)
{
int
lidx
,
ridx
,
pivot
;
lidx
=
left
;
ridx
=
right
;
if
(
left
<
right
)
{
pivot
=
(
left
+
right
)
/
2
;
while
(
lidx
<=
pivot
&&
ridx
>=
pivot
)
{
while
(
arr
[
lidx
]
<
arr
[
pivot
]
&&
lidx
<=
pivot
)
lidx
++
;
while
(
arr
[
ridx
]
>
arr
[
pivot
]
&&
ridx
>=
pivot
)
ridx
--
;
swap
(
&
arr
[
lidx
],
&
arr
[
ridx
]);
lidx
++
;
ridx
--
;
if
(
lidx
-
1
==
pivot
)
{
ridx
++
;
pivot
=
ridx
;
}
else
if
(
ridx
+
1
==
pivot
)
{
lidx
--
;
pivot
=
lidx
;
}
}
quicksortmv
(
arr
,
left
,
pivot
-
1
);
quicksortmv
(
arr
,
pivot
+
1
,
right
);
}
}
static
void
quicksortsad
(
int
arr
[],
int
idx
[],
int
left
,
int
right
)
{
int
lidx
,
ridx
,
pivot
;
lidx
=
left
;
ridx
=
right
;
if
(
left
<
right
)
{
pivot
=
(
left
+
right
)
/
2
;
while
(
lidx
<=
pivot
&&
ridx
>=
pivot
)
{
while
(
arr
[
lidx
]
<
arr
[
pivot
]
&&
lidx
<=
pivot
)
lidx
++
;
while
(
arr
[
ridx
]
>
arr
[
pivot
]
&&
ridx
>=
pivot
)
ridx
--
;
swap
(
&
arr
[
lidx
],
&
arr
[
ridx
]);
swap
(
&
idx
[
lidx
],
&
idx
[
ridx
]);
lidx
++
;
ridx
--
;
if
(
lidx
-
1
==
pivot
)
{
ridx
++
;
pivot
=
ridx
;
}
else
if
(
ridx
+
1
==
pivot
)
{
lidx
--
;
pivot
=
lidx
;
}
}
quicksortsad
(
arr
,
idx
,
left
,
pivot
-
1
);
quicksortsad
(
arr
,
idx
,
pivot
+
1
,
right
);
}
}
//The improved MV prediction
static
void
vp8_mv_pred
(
VP8_COMP
*
cpi
,
MACROBLOCKD
*
xd
,
const
MODE_INFO
*
here
,
MV
*
mvp
,
int
refframe
,
int
*
ref_frame_sign_bias
,
int
*
sr
,
int
near_sadidx
[]
)
{
const
MODE_INFO
*
above
=
here
-
xd
->
mode_info_stride
;
const
MODE_INFO
*
left
=
here
-
1
;
const
MODE_INFO
*
aboveleft
=
above
-
1
;
int_mv
near_mvs
[
7
];
int
near_ref
[
7
];
int_mv
mv
;
int
vcnt
=
0
;
int
find
=
0
;
int
mb_offset
;
int
mvx
[
7
];
int
mvy
[
7
];
int
i
;
mv
.
as_int
=
0
;
if
(
here
->
mbmi
.
ref_frame
!=
INTRA_FRAME
)
{
near_mvs
[
0
].
as_int
=
near_mvs
[
1
].
as_int
=
near_mvs
[
2
].
as_int
=
near_mvs
[
3
].
as_int
=
near_mvs
[
4
].
as_int
=
near_mvs
[
5
].
as_int
=
near_mvs
[
6
].
as_int
=
0
;
near_ref
[
0
]
=
near_ref
[
1
]
=
near_ref
[
2
]
=
near_ref
[
3
]
=
near_ref
[
4
]
=
near_ref
[
5
]
=
near_ref
[
6
]
=
0
;
// read in 3 nearby block's MVs from current frame as prediction candidates.
if
(
above
->
mbmi
.
ref_frame
!=
INTRA_FRAME
)
{
near_mvs
[
vcnt
].
as_int
=
above
->
mbmi
.
mv
.
as_int
;
mv_bias
(
above
,
refframe
,
&
near_mvs
[
vcnt
],
ref_frame_sign_bias
);
near_ref
[
vcnt
]
=
above
->
mbmi
.
ref_frame
;
}
vcnt
++
;
if
(
left
->
mbmi
.
ref_frame
!=
INTRA_FRAME
)
{
near_mvs
[
vcnt
].
as_int
=
left
->
mbmi
.
mv
.
as_int
;
mv_bias
(
left
,
refframe
,
&
near_mvs
[
vcnt
],
ref_frame_sign_bias
);
near_ref
[
vcnt
]
=
left
->
mbmi
.
ref_frame
;
}
vcnt
++
;
if
(
aboveleft
->
mbmi
.
ref_frame
!=
INTRA_FRAME
)
{
near_mvs
[
vcnt
].
as_int
=
aboveleft
->
mbmi
.
mv
.
as_int
;
mv_bias
(
aboveleft
,
refframe
,
&
near_mvs
[
vcnt
],
ref_frame_sign_bias
);