Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Guillaume Martres
aom-rav1e
Commits
1d60b6bc
Commit
1d60b6bc
authored
Feb 12, 2013
by
John Koleszar
Committed by
Gerrit Code Review
Feb 12, 2013
Browse files
Merge "Replace as_mv struct with array" into experimental
parents
f496f601
7ca517f7
Changes
8
Hide whitespace changes
Inline
Side-by-side
vp9/common/vp9_blockd.h
View file @
1d60b6bc
...
...
@@ -218,10 +218,7 @@ union b_mode_info {
B_PREDICTION_MODE
context
;
#endif
}
as_mode
;
struct
{
int_mv
first
;
int_mv
second
;
}
as_mv
;
int_mv
as_mv
[
2
];
// first, second inter predictor motion vectors
};
typedef
enum
{
...
...
vp9/common/vp9_debugmodes.c
View file @
1d60b6bc
...
...
@@ -129,8 +129,8 @@ void vp9_print_modes_and_motion_vectors(MODE_INFO *mi, int rows, int cols,
mb_index
=
(
b_row
>>
2
)
*
(
cols
+
1
)
+
(
b_col
>>
2
);
bindex
=
(
b_row
&
3
)
*
4
+
(
b_col
&
3
);
fprintf
(
mvs
,
"%3d:%-3d "
,
mi
[
mb_index
].
bmi
[
bindex
].
as_mv
.
first
.
as_mv
.
row
,
mi
[
mb_index
].
bmi
[
bindex
].
as_mv
.
first
.
as_mv
.
col
);
mi
[
mb_index
].
bmi
[
bindex
].
as_mv
[
0
]
.
as_mv
.
row
,
mi
[
mb_index
].
bmi
[
bindex
].
as_mv
[
0
]
.
as_mv
.
col
);
}
...
...
vp9/common/vp9_findnearmv.h
View file @
1d60b6bc
...
...
@@ -98,7 +98,7 @@ static int left_block_mv(const MACROBLOCKD *xd,
b
+=
4
;
}
return
(
cur_mb
->
bmi
+
b
-
1
)
->
as_mv
.
first
.
as_int
;
return
(
cur_mb
->
bmi
+
b
-
1
)
->
as_mv
[
0
]
.
as_int
;
}
static
int
left_block_second_mv
(
const
MACROBLOCKD
*
xd
,
...
...
@@ -117,8 +117,8 @@ static int left_block_second_mv(const MACROBLOCKD *xd,
}
return
cur_mb
->
mbmi
.
second_ref_frame
>
0
?
(
cur_mb
->
bmi
+
b
-
1
)
->
as_mv
.
second
.
as_int
:
(
cur_mb
->
bmi
+
b
-
1
)
->
as_mv
.
first
.
as_int
;
(
cur_mb
->
bmi
+
b
-
1
)
->
as_mv
[
1
]
.
as_int
:
(
cur_mb
->
bmi
+
b
-
1
)
->
as_mv
[
0
]
.
as_int
;
}
static
int
above_block_mv
(
const
MODE_INFO
*
cur_mb
,
int
b
,
int
mi_stride
)
{
...
...
@@ -131,7 +131,7 @@ static int above_block_mv(const MODE_INFO *cur_mb, int b, int mi_stride) {
b
+=
16
;
}
return
(
cur_mb
->
bmi
+
b
-
4
)
->
as_mv
.
first
.
as_int
;
return
(
cur_mb
->
bmi
+
b
-
4
)
->
as_mv
[
0
]
.
as_int
;
}
static
int
above_block_second_mv
(
const
MODE_INFO
*
cur_mb
,
int
b
,
int
mi_stride
)
{
...
...
@@ -146,8 +146,8 @@ static int above_block_second_mv(const MODE_INFO *cur_mb, int b, int mi_stride)
}
return
cur_mb
->
mbmi
.
second_ref_frame
>
0
?
(
cur_mb
->
bmi
+
b
-
4
)
->
as_mv
.
second
.
as_int
:
(
cur_mb
->
bmi
+
b
-
4
)
->
as_mv
.
first
.
as_int
;
(
cur_mb
->
bmi
+
b
-
4
)
->
as_mv
[
1
]
.
as_int
:
(
cur_mb
->
bmi
+
b
-
4
)
->
as_mv
[
0
]
.
as_int
;
}
static
B_PREDICTION_MODE
left_block_mode
(
const
MODE_INFO
*
cur_mb
,
int
b
)
{
...
...
vp9/common/vp9_reconinter.c
View file @
1d60b6bc
...
...
@@ -154,7 +154,7 @@ void vp9_build_inter_predictors_b(BLOCKD *d, int pitch,
int_mv
mv
;
ptr_base
=
*
(
d
->
base_pre
);
mv
.
as_int
=
d
->
bmi
.
as_mv
.
first
.
as_int
;
mv
.
as_int
=
d
->
bmi
.
as_mv
[
0
]
.
as_int
;
ptr
=
ptr_base
+
d
->
pre
+
(
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
mv
.
as_mv
.
col
>>
3
);
...
...
@@ -179,7 +179,7 @@ void vp9_build_2nd_inter_predictors_b(BLOCKD *d, int pitch,
int_mv
mv
;
ptr_base
=
*
(
d
->
base_second_pre
);
mv
.
as_int
=
d
->
bmi
.
as_mv
.
second
.
as_int
;
mv
.
as_int
=
d
->
bmi
.
as_mv
[
1
]
.
as_int
;
ptr
=
ptr_base
+
d
->
pre
+
(
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
mv
.
as_mv
.
col
>>
3
);
...
...
@@ -197,7 +197,7 @@ void vp9_build_inter_predictors4b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
int_mv
mv
;
ptr_base
=
*
(
d
->
base_pre
);
mv
.
as_int
=
d
->
bmi
.
as_mv
.
first
.
as_int
;
mv
.
as_int
=
d
->
bmi
.
as_mv
[
0
]
.
as_int
;
ptr
=
ptr_base
+
d
->
pre
+
(
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
mv
.
as_mv
.
col
>>
3
);
...
...
@@ -222,7 +222,7 @@ void vp9_build_2nd_inter_predictors4b(MACROBLOCKD *xd,
int_mv
mv
;
ptr_base
=
*
(
d
->
base_second_pre
);
mv
.
as_int
=
d
->
bmi
.
as_mv
.
second
.
as_int
;
mv
.
as_int
=
d
->
bmi
.
as_mv
[
1
]
.
as_int
;
ptr
=
ptr_base
+
d
->
pre
+
(
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
mv
.
as_mv
.
col
>>
3
);
...
...
@@ -240,7 +240,7 @@ static void build_inter_predictors2b(MACROBLOCKD *xd, BLOCKD *d, int pitch) {
int_mv
mv
;
ptr_base
=
*
(
d
->
base_pre
);
mv
.
as_int
=
d
->
bmi
.
as_mv
.
first
.
as_int
;
mv
.
as_int
=
d
->
bmi
.
as_mv
[
0
]
.
as_int
;
ptr
=
ptr_base
+
d
->
pre
+
(
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
mv
.
as_mv
.
col
>>
3
);
...
...
@@ -264,38 +264,38 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
int
voffset
=
20
+
i
*
2
+
j
;
int
temp
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
.
first
.
as_mv
.
row
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
.
first
.
as_mv
.
row
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
.
first
.
as_mv
.
row
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
;
if
(
temp
<
0
)
temp
-=
4
;
else
temp
+=
4
;
xd
->
block
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
=
(
temp
/
8
)
&
xd
->
block
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
.
first
.
as_mv
.
col
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
.
first
.
as_mv
.
col
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
.
first
.
as_mv
.
col
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
;
if
(
temp
<
0
)
temp
-=
4
;
else
temp
+=
4
;
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
blockd
[
voffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
;
blockd
[
voffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
;
if
(
xd
->
mode_info_context
->
mbmi
.
second_ref_frame
>
0
)
{
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
.
second
.
as_mv
.
row
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
.
second
.
as_mv
.
row
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
.
second
.
as_mv
.
row
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
;
if
(
temp
<
0
)
{
temp
-=
4
;
...
...
@@ -303,13 +303,13 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
temp
+=
4
;
}
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
.
second
.
as_mv
.
col
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
.
second
.
as_mv
.
col
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
.
second
.
as_mv
.
col
;
temp
=
blockd
[
yoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
+
blockd
[
yoffset
+
1
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
+
blockd
[
yoffset
+
4
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
+
blockd
[
yoffset
+
5
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
;
if
(
temp
<
0
)
{
temp
-=
4
;
...
...
@@ -317,13 +317,13 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
temp
+=
4
;
}
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
blockd
[
voffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
;
blockd
[
voffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
;
}
}
}
...
...
@@ -332,7 +332,7 @@ void vp9_build_inter4x4_predictors_mbuv(MACROBLOCKD *xd) {
BLOCKD
*
d0
=
&
blockd
[
i
];
BLOCKD
*
d1
=
&
blockd
[
i
+
1
];
if
(
d0
->
bmi
.
as_mv
.
first
.
as_int
==
d1
->
bmi
.
as_mv
.
first
.
as_int
)
if
(
d0
->
bmi
.
as_mv
[
0
]
.
as_int
==
d1
->
bmi
.
as_mv
[
0
]
.
as_int
)
build_inter_predictors2b
(
xd
,
d0
,
8
);
else
{
vp9_build_inter_predictors_b
(
d0
,
8
,
&
xd
->
subpix
);
...
...
@@ -717,15 +717,15 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
blockd
[
10
].
bmi
=
xd
->
mode_info_context
->
bmi
[
10
];
if
(
mbmi
->
need_to_clamp_mvs
)
{
clamp_mv_to_umv_border
(
&
blockd
[
0
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
2
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
8
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
10
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
0
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
2
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
8
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
10
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
if
(
mbmi
->
second_ref_frame
>
0
)
{
clamp_mv_to_umv_border
(
&
blockd
[
0
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
2
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
8
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
10
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
0
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
2
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
8
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
10
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
}
}
...
...
@@ -750,15 +750,15 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
blockd
[
i
+
1
].
bmi
=
xd
->
mode_info_context
->
bmi
[
i
+
1
];
if
(
mbmi
->
need_to_clamp_mvs
)
{
clamp_mv_to_umv_border
(
&
blockd
[
i
+
0
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
i
+
1
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
i
+
0
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
i
+
1
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
if
(
mbmi
->
second_ref_frame
>
0
)
{
clamp_mv_to_umv_border
(
&
blockd
[
i
+
0
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
i
+
1
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
i
+
0
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
clamp_mv_to_umv_border
(
&
blockd
[
i
+
1
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
}
}
if
(
d0
->
bmi
.
as_mv
.
first
.
as_int
==
d1
->
bmi
.
as_mv
.
first
.
as_int
)
if
(
d0
->
bmi
.
as_mv
[
0
]
.
as_int
==
d1
->
bmi
.
as_mv
[
0
]
.
as_int
)
build_inter_predictors2b
(
xd
,
d0
,
16
);
else
{
vp9_build_inter_predictors_b
(
d0
,
16
,
&
xd
->
subpix
);
...
...
@@ -776,7 +776,7 @@ static void build_inter4x4_predictors_mb(MACROBLOCKD *xd) {
BLOCKD
*
d0
=
&
blockd
[
i
];
BLOCKD
*
d1
=
&
blockd
[
i
+
1
];
if
(
d0
->
bmi
.
as_mv
.
first
.
as_int
==
d1
->
bmi
.
as_mv
.
first
.
as_int
)
if
(
d0
->
bmi
.
as_mv
[
0
]
.
as_int
==
d1
->
bmi
.
as_mv
[
0
]
.
as_int
)
build_inter_predictors2b
(
xd
,
d0
,
8
);
else
{
vp9_build_inter_predictors_b
(
d0
,
8
,
&
xd
->
subpix
);
...
...
@@ -803,44 +803,44 @@ void build_4x4uvmvs(MACROBLOCKD *xd) {
int
temp
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
.
first
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
.
first
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
.
first
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
.
first
.
as_mv
.
row
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
[
0
]
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
[
0
]
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
[
0
]
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
[
0
]
.
as_mv
.
row
;
if
(
temp
<
0
)
temp
-=
4
;
else
temp
+=
4
;
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
.
first
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
.
first
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
.
first
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
.
first
.
as_mv
.
col
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
[
0
]
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
[
0
]
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
[
0
]
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
[
0
]
.
as_mv
.
col
;
if
(
temp
<
0
)
temp
-=
4
;
else
temp
+=
4
;
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
// if (x->mode_info_context->mbmi.need_to_clamp_mvs)
clamp_uvmv_to_umv_border
(
&
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_uvmv_to_umv_border
(
&
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
// if (x->mode_info_context->mbmi.need_to_clamp_mvs)
clamp_uvmv_to_umv_border
(
&
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
,
xd
);
clamp_uvmv_to_umv_border
(
&
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
,
xd
);
blockd
[
voffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
.
first
.
as_mv
.
col
;
blockd
[
voffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
;
if
(
xd
->
mode_info_context
->
mbmi
.
second_ref_frame
>
0
)
{
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
.
second
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
.
second
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
.
second
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
.
second
.
as_mv
.
row
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
[
1
]
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
[
1
]
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
[
1
]
.
as_mv
.
row
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
[
1
]
.
as_mv
.
row
;
if
(
temp
<
0
)
{
temp
-=
4
;
...
...
@@ -848,13 +848,13 @@ void build_4x4uvmvs(MACROBLOCKD *xd) {
temp
+=
4
;
}
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
.
second
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
.
second
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
.
second
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
.
second
.
as_mv
.
col
;
temp
=
xd
->
mode_info_context
->
bmi
[
yoffset
+
0
].
as_mv
[
1
]
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
1
].
as_mv
[
1
]
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
4
].
as_mv
[
1
]
.
as_mv
.
col
+
xd
->
mode_info_context
->
bmi
[
yoffset
+
5
].
as_mv
[
1
]
.
as_mv
.
col
;
if
(
temp
<
0
)
{
temp
-=
4
;
...
...
@@ -862,21 +862,21 @@ void build_4x4uvmvs(MACROBLOCKD *xd) {
temp
+=
4
;
}
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
=
(
temp
/
8
)
&
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
=
(
temp
/
8
)
&
xd
->
fullpixel_mask
;
// if (mbmi->need_to_clamp_mvs)
clamp_uvmv_to_umv_border
(
&
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
&
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
// if (mbmi->need_to_clamp_mvs)
clamp_uvmv_to_umv_border
(
&
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
,
xd
);
&
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
,
xd
);
blockd
[
voffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
.
second
.
as_mv
.
col
;
blockd
[
voffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
=
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
row
;
blockd
[
voffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
=
blockd
[
uoffset
].
bmi
.
as_mv
[
1
]
.
as_mv
.
col
;
}
}
}
...
...
vp9/decoder/vp9_decodemv.c
View file @
1d60b6bc
...
...
@@ -1041,9 +1041,9 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
fill_offset
=
&
mbsplit_fill_offset
[
s
][(
unsigned
char
)
j
*
mbsplit_fill_count
[
s
]];
do
{
mi
->
bmi
[
*
fill_offset
].
as_mv
.
first
.
as_int
=
blockmv
.
as_int
;
mi
->
bmi
[
*
fill_offset
].
as_mv
[
0
]
.
as_int
=
blockmv
.
as_int
;
if
(
mbmi
->
second_ref_frame
>
0
)
mi
->
bmi
[
*
fill_offset
].
as_mv
.
second
.
as_int
=
secondmv
.
as_int
;
mi
->
bmi
[
*
fill_offset
].
as_mv
[
1
]
.
as_int
=
secondmv
.
as_int
;
fill_offset
++
;
}
while
(
--
fill_count
);
}
...
...
@@ -1051,8 +1051,8 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
}
while
(
++
j
<
num_p
);
}
mv
->
as_int
=
mi
->
bmi
[
15
].
as_mv
.
first
.
as_int
;
mbmi
->
mv
[
1
].
as_int
=
mi
->
bmi
[
15
].
as_mv
.
second
.
as_int
;
mv
->
as_int
=
mi
->
bmi
[
15
].
as_mv
[
0
]
.
as_int
;
mbmi
->
mv
[
1
].
as_int
=
mi
->
bmi
[
15
].
as_mv
[
1
]
.
as_int
;
break
;
/* done with SPLITMV */
...
...
vp9/encoder/vp9_mcomp.c
View file @
1d60b6bc
...
...
@@ -1546,7 +1546,7 @@ int vp9_full_search_sad_c(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int
in_what_stride
=
d
->
pre_stride
;
int
mv_stride
=
d
->
pre_stride
;
uint8_t
*
bestaddress
;
int_mv
*
best_mv
=
&
d
->
bmi
.
as_mv
.
first
;
int_mv
*
best_mv
=
&
d
->
bmi
.
as_mv
[
0
]
;
int_mv
this_mv
;
int
bestsad
=
INT_MAX
;
int
r
,
c
;
...
...
@@ -1641,7 +1641,7 @@ int vp9_full_search_sadx3(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int
in_what_stride
=
d
->
pre_stride
;
int
mv_stride
=
d
->
pre_stride
;
uint8_t
*
bestaddress
;
int_mv
*
best_mv
=
&
d
->
bmi
.
as_mv
.
first
;
int_mv
*
best_mv
=
&
d
->
bmi
.
as_mv
[
0
]
;
int_mv
this_mv
;
unsigned
int
bestsad
=
INT_MAX
;
int
r
,
c
;
...
...
@@ -1770,7 +1770,7 @@ int vp9_full_search_sadx8(MACROBLOCK *x, BLOCK *b, BLOCKD *d, int_mv *ref_mv,
int
in_what_stride
=
d
->
pre_stride
;
int
mv_stride
=
d
->
pre_stride
;
uint8_t
*
bestaddress
;
int_mv
*
best_mv
=
&
d
->
bmi
.
as_mv
.
first
;
int_mv
*
best_mv
=
&
d
->
bmi
.
as_mv
[
0
]
;
int_mv
this_mv
;
unsigned
int
bestsad
=
INT_MAX
;
int
r
,
c
;
...
...
vp9/encoder/vp9_rdopt.c
View file @
1d60b6bc
...
...
@@ -2166,17 +2166,17 @@ static int labels2mode(
}
break
;
case
LEFT4X4
:
this_mv
->
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
.
first
.
as_int
:
this_mv
->
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
[
0
]
.
as_int
:
left_block_mv
(
xd
,
mic
,
i
);
if
(
mbmi
->
second_ref_frame
>
0
)
this_second_mv
->
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
.
second
.
as_int
:
this_second_mv
->
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
[
1
]
.
as_int
:
left_block_second_mv
(
xd
,
mic
,
i
);
break
;
case
ABOVE4X4
:
this_mv
->
as_int
=
row
?
d
[
-
4
].
bmi
.
as_mv
.
first
.
as_int
:
this_mv
->
as_int
=
row
?
d
[
-
4
].
bmi
.
as_mv
[
0
]
.
as_int
:
above_block_mv
(
mic
,
i
,
mis
);
if
(
mbmi
->
second_ref_frame
>
0
)
this_second_mv
->
as_int
=
row
?
d
[
-
4
].
bmi
.
as_mv
.
second
.
as_int
:
this_second_mv
->
as_int
=
row
?
d
[
-
4
].
bmi
.
as_mv
[
1
]
.
as_int
:
above_block_second_mv
(
mic
,
i
,
mis
);
break
;
case
ZERO4X4
:
...
...
@@ -2192,10 +2192,10 @@ static int labels2mode(
int_mv
left_mv
,
left_second_mv
;
left_second_mv
.
as_int
=
0
;
left_mv
.
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
.
first
.
as_int
:
left_mv
.
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
[
0
]
.
as_int
:
left_block_mv
(
xd
,
mic
,
i
);
if
(
mbmi
->
second_ref_frame
>
0
)
left_second_mv
.
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
.
second
.
as_int
:
left_second_mv
.
as_int
=
col
?
d
[
-
1
].
bmi
.
as_mv
[
1
]
.
as_int
:
left_block_second_mv
(
xd
,
mic
,
i
);
if
(
left_mv
.
as_int
==
this_mv
->
as_int
&&
...
...
@@ -2212,9 +2212,9 @@ static int labels2mode(
#endif
}
d
->
bmi
.
as_mv
.
first
.
as_int
=
this_mv
->
as_int
;
d
->
bmi
.
as_mv
[
0
]
.
as_int
=
this_mv
->
as_int
;
if
(
mbmi
->
second_ref_frame
>
0
)
d
->
bmi
.
as_mv
.
second
.
as_int
=
this_second_mv
->
as_int
;
d
->
bmi
.
as_mv
[
1
]
.
as_int
=
this_second_mv
->
as_int
;
x
->
partition_info
->
bmi
[
i
].
mode
=
m
;
x
->
partition_info
->
bmi
[
i
].
mv
.
as_int
=
this_mv
->
as_int
;
...
...
@@ -2500,9 +2500,9 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
// use previous block's result as next block's MV predictor.
if
(
segmentation
==
PARTITIONING_4X4
&&
i
>
0
)
{
bsi
->
mvp
.
as_int
=
x
->
e_mbd
.
block
[
i
-
1
].
bmi
.
as_mv
.
first
.
as_int
;
bsi
->
mvp
.
as_int
=
x
->
e_mbd
.
block
[
i
-
1
].
bmi
.
as_mv
[
0
]
.
as_int
;
if
(
i
==
4
||
i
==
8
||
i
==
12
)
bsi
->
mvp
.
as_int
=
x
->
e_mbd
.
block
[
i
-
4
].
bmi
.
as_mv
.
first
.
as_int
;
bsi
->
mvp
.
as_int
=
x
->
e_mbd
.
block
[
i
-
4
].
bmi
.
as_mv
[
0
]
.
as_int
;
step_param
=
2
;
}
}
...
...
@@ -2541,11 +2541,11 @@ static void rd_check_segment_txsize(VP9_COMP *cpi, MACROBLOCK *x,
if
(
thissme
<
bestsme
)
{
bestsme
=
thissme
;
mode_mv
[
NEW4X4
].
as_int
=
e
->
bmi
.
as_mv
.
first
.
as_int
;
mode_mv
[
NEW4X4
].
as_int
=
e
->
bmi
.
as_mv
[
0
]
.
as_int
;
}
else
{
/* The full search result is actually worse so re-instate the
* previous best vector */
e
->
bmi
.
as_mv
.
first
.
as_int
=
mode_mv
[
NEW4X4
].
as_int
;
e
->
bmi
.
as_mv
[
0
]
.
as_int
=
mode_mv
[
NEW4X4
].
as_int
;
}
}
}
...
...
@@ -2885,9 +2885,9 @@ static int rd_pick_best_mbsegmentation(VP9_COMP *cpi, MACROBLOCK *x,
for
(
i
=
0
;
i
<
16
;
i
++
)
{
BLOCKD
*
bd
=
&
x
->
e_mbd
.
block
[
i
];
bd
->
bmi
.
as_mv
.
first
.
as_int
=
bsi
.
mvs
[
i
].
as_int
;
bd
->
bmi
.
as_mv
[
0
]
.
as_int
=
bsi
.
mvs
[
i
].
as_int
;
if
(
mbmi
->
second_ref_frame
>
0
)
bd
->
bmi
.
as_mv
.
second
.
as_int
=
bsi
.
second_mvs
[
i
].
as_int
;
bd
->
bmi
.
as_mv
[
1
]
.
as_int
=
bsi
.
second_mvs
[
i
].
as_int
;
bd
->
eob
=
bsi
.
eobs
[
i
];
}
...
...
@@ -3307,8 +3307,8 @@ static int64_t handle_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
x
->
nmvjointcost
,
x
->
mvcost
,
&
dis
,
&
sse
);
}
d
->
bmi
.
as_mv
.
first
.
as_int
=
tmp_mv
.
as_int
;
frame_mv
[
NEWMV
][
refs
[
0
]].
as_int
=
d
->
bmi
.
as_mv
.
first
.
as_int
;
d
->
bmi
.
as_mv
[
0
]
.
as_int
=
tmp_mv
.
as_int
;
frame_mv
[
NEWMV
][
refs
[
0
]].
as_int
=
d
->
bmi
.
as_mv
[
0
]
.
as_int
;
// Add the new motion vector cost to our rolling cost variable
*
rate2
+=
vp9_mv_bit_cost
(
&
tmp_mv
,
&
ref_mv
[
0
],
...
...
@@ -4251,10 +4251,12 @@ static void rd_pick_inter_mode(VP9_COMP *cpi, MACROBLOCK *x,
if
(
best_mbmode
.
mode
==
SPLITMV
)
{
for
(
i
=
0
;
i
<
16
;
i
++
)
xd
->
mode_info_context
->
bmi
[
i
].
as_mv
.
first
.
as_int
=
best_bmodes
[
i
].
as_mv
.
first
.
as_int
;
xd
->
mode_info_context
->
bmi
[
i
].
as_mv
[
0
].
as_int
=
best_bmodes
[
i
].
as_mv
[
0
].
as_int
;
if
(
mbmi
->
second_ref_frame
>
0
)
for
(
i
=
0
;
i
<
16
;
i
++
)
xd
->
mode_info_context
->
bmi
[
i
].
as_mv
.
second
.
as_int
=
best_bmodes
[
i
].
as_mv
.
second
.
as_int
;
xd
->
mode_info_context
->
bmi
[
i
].
as_mv
[
1
].
as_int
=
best_bmodes
[
i
].
as_mv
[
1
].
as_int
;
vpx_memcpy
(
x
->
partition_info
,
&
best_partition
,
sizeof
(
PARTITION_INFO
));
...
...
vp9/encoder/vp9_temporal_filter.c
View file @
1d60b6bc
...
...
@@ -171,7 +171,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
/*cpi->sf.search_method == HEX*/
// TODO Check that the 16x16 vf & sdf are selected here
// Ignore mv costing by sending NULL pointer instead of cost arrays
bestsme
=
vp9_hex_search
(
x
,
b
,
d
,
&
best_ref_mv1_full
,
&
d
->
bmi
.
as_mv
.
first
,
bestsme
=
vp9_hex_search
(
x
,
b
,
d
,
&
best_ref_mv1_full
,
&
d
->
bmi
.
as_mv
[
0
]
,
step_param
,
sadpb
,
&
cpi
->
fn_ptr
[
BLOCK_16X16
],
NULL
,
NULL
,
NULL
,
NULL
,
&
best_ref_mv1
);
...
...
@@ -183,7 +183,7 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
int
distortion
;
unsigned
int
sse
;
// Ignore mv costing by sending NULL pointer instead of cost array
bestsme
=
cpi
->
find_fractional_mv_step
(
x
,
b
,
d
,
&
d
->
bmi
.
as_mv
.
first
,
bestsme
=
cpi
->
find_fractional_mv_step
(
x
,
b
,
d
,
&
d
->
bmi
.
as_mv
[
0
]
,
&
best_ref_mv1
,
x
->
errorperbit
,
&
cpi
->
fn_ptr
[
BLOCK_16X16
],
...
...
@@ -263,8 +263,8 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
if
(
cpi
->
frames
[
frame
]
==
NULL
)
continue
;
mbd
->
block
[
0
].
bmi
.
as_mv
.
first
.
as_mv
.
row
=
0
;
mbd
->
block
[
0
].
bmi
.
as_mv
.
first
.
as_mv
.
col
=
0
;
mbd
->
block
[
0
].
bmi
.
as_mv
[
0
]
.
as_mv
.
row
=
0
;
mbd
->
block
[
0
].
bmi
.
as_mv
[
0
]
.
as_mv
.
col
=
0
;
if
(
frame
==
alt_ref_index
)
{