Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Xiph.Org
aom-rav1e
Commits
ccd6f7ed
Commit
ccd6f7ed
authored
Apr 28, 2011
by
Scott LaVarnway
Browse files
Consolidated build inter predictors
Code cleanup. Change-Id: Ic8b0167851116c64ddf08e8a3d302fb09ab61146
parent
2e102855
Changes
5
Hide whitespace changes
Inline
Side-by-side
vp8/common/reconinter.c
View file @
ccd6f7ed
...
...
@@ -279,100 +279,111 @@ void vp8_build_inter16x16_predictors_mby(MACROBLOCKD *x)
}
}
void
vp8_build_inter_predictors_mb
(
MACROBLOCKD
*
x
)
void
vp8_build_inter16x16_predictors_mb
(
MACROBLOCKD
*
x
,
unsigned
char
*
dst_y
,
unsigned
char
*
dst_u
,
unsigned
char
*
dst_v
,
int
dst_ystride
,
int
dst_uvstride
)
{
int
offset
;
unsigned
char
*
ptr
;
unsigned
char
*
uptr
,
*
vptr
;
if
(
x
->
mode_info_context
->
mbmi
.
mode
!=
SPLITMV
)
{
int
offset
;
unsigned
char
*
ptr_base
;
unsigned
char
*
ptr
;
unsigned
char
*
uptr
,
*
vptr
;
unsigned
char
*
pred_ptr
=
x
->
predictor
;
unsigned
char
*
upred_ptr
=
&
x
->
predictor
[
256
];
unsigned
char
*
vpred_ptr
=
&
x
->
predictor
[
320
];
int
mv_row
=
x
->
mode_info_context
->
mbmi
.
mv
.
as_mv
.
row
;
int
mv_col
=
x
->
mode_info_context
->
mbmi
.
mv
.
as_mv
.
col
;
int
mv_row
=
x
->
mode_info_context
->
mbmi
.
mv
.
as_mv
.
row
;
int
mv_col
=
x
->
mode_info_context
->
mbmi
.
mv
.
as_mv
.
col
;
int
pre_stride
=
x
->
block
[
0
].
pre_stride
;
unsigned
char
*
ptr_base
=
x
->
pre
.
y_buffer
;
int
pre_stride
=
x
->
block
[
0
].
pre_stride
;
ptr_base
=
x
->
pre
.
y_buffer
;
ptr
=
ptr_base
+
(
mv_row
>>
3
)
*
pre_stride
+
(
mv_col
>>
3
);
ptr
=
ptr_base
+
(
mv_row
>>
3
)
*
pre_stride
+
(
mv_col
>>
3
);
if
((
mv_row
|
mv_col
)
&
7
)
{
x
->
subpixel_predict16x16
(
ptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
pred_ptr
,
16
);
}
else
{
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy16x16
)(
ptr
,
pre_stride
,
pred_ptr
,
16
);
}
if
((
mv_row
|
mv_col
)
&
7
)
{
x
->
subpixel_predict16x16
(
ptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
dst_y
,
dst_ystride
);
}
else
{
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy16x16
)(
ptr
,
pre_stride
,
dst_y
,
dst_ystride
);
}
mv_row
=
x
->
block
[
16
].
bmi
.
mv
.
as_mv
.
row
;
mv_col
=
x
->
block
[
16
].
bmi
.
mv
.
as_mv
.
col
;
pre_stride
>>=
1
;
offset
=
(
mv_row
>>
3
)
*
pre_stride
+
(
mv_col
>>
3
);
uptr
=
x
->
pre
.
u_buffer
+
offset
;
vptr
=
x
->
pre
.
v_buffer
+
offset
;
mv_row
=
x
->
block
[
16
].
bmi
.
mv
.
as_mv
.
row
;
mv_col
=
x
->
block
[
16
].
bmi
.
mv
.
as_mv
.
col
;
pre_stride
>>=
1
;
offset
=
(
mv_row
>>
3
)
*
pre_stride
+
(
mv_col
>>
3
);
uptr
=
x
->
pre
.
u_buffer
+
offset
;
vptr
=
x
->
pre
.
v_buffer
+
offset
;
if
((
mv_row
|
mv_col
)
&
7
)
{
x
->
subpixel_predict8x8
(
uptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
upred_ptr
,
8
);
x
->
subpixel_predict8x8
(
vptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
vpred_ptr
,
8
);
}
else
{
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy8x8
)(
uptr
,
pre_stride
,
upred_ptr
,
8
);
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy8x8
)(
vptr
,
pre_stride
,
vpred_ptr
,
8
);
}
if
((
mv_row
|
mv_col
)
&
7
)
{
x
->
subpixel_predict8x8
(
uptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
dst_u
,
dst_uvstride
);
x
->
subpixel_predict8x8
(
vptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
dst_v
,
dst_uvstride
);
}
else
{
int
i
;
if
(
x
->
mode_info_context
->
mbmi
.
partitioning
<
3
)
{
for
(
i
=
0
;
i
<
4
;
i
++
)
{
BLOCKD
*
d
=
&
x
->
block
[
bbb
[
i
]];
build_inter_predictors4b
(
x
,
d
,
16
);
}
}
else
{
for
(
i
=
0
;
i
<
16
;
i
+=
2
)
{
BLOCKD
*
d0
=
&
x
->
block
[
i
];
BLOCKD
*
d1
=
&
x
->
block
[
i
+
1
];
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy8x8
)(
uptr
,
pre_stride
,
dst_u
,
dst_uvstride
);
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy8x8
)(
vptr
,
pre_stride
,
dst_v
,
dst_uvstride
);
}
if
(
d0
->
bmi
.
mv
.
as_int
==
d1
->
bmi
.
mv
.
as_int
)
build_inter_predictors2b
(
x
,
d0
,
16
);
else
{
vp8_build_inter_predictors_b
(
d0
,
16
,
x
->
subpixel_predict
);
vp8_build_inter_predictors_b
(
d1
,
16
,
x
->
subpixel_predict
);
}
}
}
void
vp8_build_inter4x4_predictors_mb
(
MACROBLOCKD
*
x
)
{
int
i
;
if
(
x
->
mode_info_context
->
mbmi
.
partitioning
<
3
)
{
for
(
i
=
0
;
i
<
4
;
i
++
)
{
BLOCKD
*
d
=
&
x
->
block
[
bbb
[
i
]];
build_inter_predictors4b
(
x
,
d
,
16
);
}
for
(
i
=
16
;
i
<
24
;
i
+=
2
)
}
else
{
for
(
i
=
0
;
i
<
16
;
i
+=
2
)
{
BLOCKD
*
d0
=
&
x
->
block
[
i
];
BLOCKD
*
d1
=
&
x
->
block
[
i
+
1
];
if
(
d0
->
bmi
.
mv
.
as_int
==
d1
->
bmi
.
mv
.
as_int
)
build_inter_predictors2b
(
x
,
d0
,
8
);
build_inter_predictors2b
(
x
,
d0
,
16
);
else
{
vp8_build_inter_predictors_b
(
d0
,
8
,
x
->
subpixel_predict
);
vp8_build_inter_predictors_b
(
d1
,
8
,
x
->
subpixel_predict
);
vp8_build_inter_predictors_b
(
d0
,
16
,
x
->
subpixel_predict
);
vp8_build_inter_predictors_b
(
d1
,
16
,
x
->
subpixel_predict
);
}
}
}
for
(
i
=
16
;
i
<
24
;
i
+=
2
)
{
BLOCKD
*
d0
=
&
x
->
block
[
i
];
BLOCKD
*
d1
=
&
x
->
block
[
i
+
1
];
if
(
d0
->
bmi
.
mv
.
as_int
==
d1
->
bmi
.
mv
.
as_int
)
build_inter_predictors2b
(
x
,
d0
,
8
);
else
{
vp8_build_inter_predictors_b
(
d0
,
8
,
x
->
subpixel_predict
);
vp8_build_inter_predictors_b
(
d1
,
8
,
x
->
subpixel_predict
);
}
}
}
void
vp8_build_inter_predictors_mb
(
MACROBLOCKD
*
x
)
{
if
(
x
->
mode_info_context
->
mbmi
.
mode
!=
SPLITMV
)
{
vp8_build_inter16x16_predictors_mb
(
x
,
x
->
predictor
,
&
x
->
predictor
[
256
],
&
x
->
predictor
[
320
],
16
,
8
);
}
else
{
vp8_build_inter4x4_predictors_mb
(
x
);
}
}
void
vp8_build_uvmvs
(
MACROBLOCKD
*
x
,
int
fullpixel
)
...
...
@@ -455,91 +466,5 @@ void vp8_build_uvmvs(MACROBLOCKD *x, int fullpixel)
}
/* The following functions are wriiten for skip_recon_mb() to call. Since there is no recon in this
* situation, we can write the result directly to dst buffer instead of writing it to predictor
* buffer and then copying it to dst buffer.
*/
static
void
vp8_build_inter_predictors_b_s
(
BLOCKD
*
d
,
unsigned
char
*
dst_ptr
,
vp8_subpix_fn_t
sppf
)
{
int
r
;
unsigned
char
*
ptr_base
;
unsigned
char
*
ptr
;
/*unsigned char *pred_ptr = d->predictor;*/
int
dst_stride
=
d
->
dst_stride
;
int
pre_stride
=
d
->
pre_stride
;
ptr_base
=
*
(
d
->
base_pre
);
if
(
d
->
bmi
.
mv
.
as_mv
.
row
&
7
||
d
->
bmi
.
mv
.
as_mv
.
col
&
7
)
{
ptr
=
ptr_base
+
d
->
pre
+
(
d
->
bmi
.
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
d
->
bmi
.
mv
.
as_mv
.
col
>>
3
);
sppf
(
ptr
,
pre_stride
,
d
->
bmi
.
mv
.
as_mv
.
col
&
7
,
d
->
bmi
.
mv
.
as_mv
.
row
&
7
,
dst_ptr
,
dst_stride
);
}
else
{
ptr_base
+=
d
->
pre
+
(
d
->
bmi
.
mv
.
as_mv
.
row
>>
3
)
*
d
->
pre_stride
+
(
d
->
bmi
.
mv
.
as_mv
.
col
>>
3
);
ptr
=
ptr_base
;
for
(
r
=
0
;
r
<
4
;
r
++
)
{
#ifdef MUST_BE_ALIGNED
dst_ptr
[
0
]
=
ptr
[
0
];
dst_ptr
[
1
]
=
ptr
[
1
];
dst_ptr
[
2
]
=
ptr
[
2
];
dst_ptr
[
3
]
=
ptr
[
3
];
#else
*
(
int
*
)
dst_ptr
=
*
(
int
*
)
ptr
;
#endif
dst_ptr
+=
dst_stride
;
ptr
+=
pre_stride
;
}
}
}
void
vp8_build_inter16x16_predictors_mb_s
(
MACROBLOCKD
*
x
)
{
unsigned
char
*
dst_ptr
=
x
->
dst
.
y_buffer
;
int
offset
;
unsigned
char
*
ptr_base
;
unsigned
char
*
ptr
;
unsigned
char
*
uptr
,
*
vptr
;
unsigned
char
*
udst_ptr
=
x
->
dst
.
u_buffer
;
unsigned
char
*
vdst_ptr
=
x
->
dst
.
v_buffer
;
int
mv_row
=
x
->
mode_info_context
->
mbmi
.
mv
.
as_mv
.
row
;
int
mv_col
=
x
->
mode_info_context
->
mbmi
.
mv
.
as_mv
.
col
;
int
pre_stride
=
x
->
dst
.
y_stride
;
/*x->block[0].pre_stride;*/
ptr_base
=
x
->
pre
.
y_buffer
;
ptr
=
ptr_base
+
(
mv_row
>>
3
)
*
pre_stride
+
(
mv_col
>>
3
);
if
((
mv_row
|
mv_col
)
&
7
)
{
x
->
subpixel_predict16x16
(
ptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
dst_ptr
,
x
->
dst
.
y_stride
);
/*x->block[0].dst_stride);*/
}
else
{
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy16x16
)(
ptr
,
pre_stride
,
dst_ptr
,
x
->
dst
.
y_stride
);
/*x->block[0].dst_stride);*/
}
mv_row
=
x
->
block
[
16
].
bmi
.
mv
.
as_mv
.
row
;
mv_col
=
x
->
block
[
16
].
bmi
.
mv
.
as_mv
.
col
;
pre_stride
>>=
1
;
offset
=
(
mv_row
>>
3
)
*
pre_stride
+
(
mv_col
>>
3
);
uptr
=
x
->
pre
.
u_buffer
+
offset
;
vptr
=
x
->
pre
.
v_buffer
+
offset
;
if
((
mv_row
|
mv_col
)
&
7
)
{
x
->
subpixel_predict8x8
(
uptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
udst_ptr
,
x
->
dst
.
uv_stride
);
x
->
subpixel_predict8x8
(
vptr
,
pre_stride
,
mv_col
&
7
,
mv_row
&
7
,
vdst_ptr
,
x
->
dst
.
uv_stride
);
}
else
{
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy8x8
)(
uptr
,
pre_stride
,
udst_ptr
,
x
->
dst
.
uv_stride
);
RECON_INVOKE
(
&
x
->
rtcd
->
recon
,
copy8x8
)(
vptr
,
pre_stride
,
vdst_ptr
,
x
->
dst
.
uv_stride
);
}
}
vp8/common/reconinter.h
View file @
ccd6f7ed
...
...
@@ -13,7 +13,13 @@
#define __INC_RECONINTER_H
extern
void
vp8_build_inter_predictors_mb
(
MACROBLOCKD
*
x
);
extern
void
vp8_build_inter16x16_predictors_mb_s
(
MACROBLOCKD
*
x
);
extern
void
vp8_build_inter16x16_predictors_mb
(
MACROBLOCKD
*
x
,
unsigned
char
*
dst_y
,
unsigned
char
*
dst_u
,
unsigned
char
*
dst_v
,
int
dst_ystride
,
int
dst_uvstride
);
extern
void
vp8_build_inter16x16_predictors_mby
(
MACROBLOCKD
*
x
);
extern
void
vp8_build_uvmvs
(
MACROBLOCKD
*
x
,
int
fullpixel
);
...
...
vp8/decoder/decodframe.c
View file @
ccd6f7ed
...
...
@@ -119,7 +119,9 @@ static void skip_recon_mb(VP8D_COMP *pbi, MACROBLOCKD *xd)
}
else
{
vp8_build_inter16x16_predictors_mb_s
(
xd
);
vp8_build_inter16x16_predictors_mb
(
xd
,
xd
->
dst
.
y_buffer
,
xd
->
dst
.
u_buffer
,
xd
->
dst
.
v_buffer
,
xd
->
dst
.
y_stride
,
xd
->
dst
.
uv_stride
);
}
}
...
...
@@ -221,6 +223,9 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
build_intra_predictors_mby
)(
xd
);
}
else
{
vp8_intra_prediction_down_copy
(
xd
);
}
}
else
...
...
@@ -232,6 +237,7 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd)
if
(
xd
->
mode_info_context
->
mbmi
.
mode
!=
B_PRED
&&
xd
->
mode_info_context
->
mbmi
.
mode
!=
SPLITMV
)
{
BLOCKD
*
b
=
&
xd
->
block
[
24
];
DEQUANT_INVOKE
(
&
pbi
->
dequant
,
block
)(
b
);
/* do 2nd order transform on the dc block */
...
...
vp8/decoder/threading.c
View file @
ccd6f7ed
...
...
@@ -118,14 +118,16 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int m
xd
->
mode_info_context
->
mbmi
.
mb_skip_coeff
=
1
;
/*mt_skip_recon_mb(pbi, xd, mb_row, mb_col);*/
if
(
xd
->
frame_type
==
KEY_FRAME
||
xd
->
mode_info_context
->
mbmi
.
ref_frame
==
INTRA_FRAME
)
if
(
xd
->
mode_info_context
->
mbmi
.
ref_frame
==
INTRA_FRAME
)
{
vp8mt_build_intra_predictors_mbuv_s
(
pbi
,
xd
,
mb_row
,
mb_col
);
vp8mt_build_intra_predictors_mby_s
(
pbi
,
xd
,
mb_row
,
mb_col
);
}
else
{
vp8_build_inter16x16_predictors_mb_s
(
xd
);
vp8_build_inter16x16_predictors_mb
(
xd
,
xd
->
dst
.
y_buffer
,
xd
->
dst
.
u_buffer
,
xd
->
dst
.
v_buffer
,
xd
->
dst
.
y_stride
,
xd
->
dst
.
uv_stride
);
}
return
;
}
...
...
vp8/encoder/encodeframe.c
View file @
ccd6f7ed
...
...
@@ -1391,7 +1391,10 @@ int vp8cx_encode_inter_macroblock
}
else
vp8_build_inter16x16_predictors_mb_s
(
xd
);
vp8_build_inter16x16_predictors_mb
(
xd
,
xd
->
dst
.
y_buffer
,
xd
->
dst
.
u_buffer
,
xd
->
dst
.
v_buffer
,
xd
->
dst
.
y_stride
,
xd
->
dst
.
uv_stride
);
}
if
(
!
x
->
skip
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment