Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Xiph.Org
aom-rav1e
Commits
48f51257
Commit
48f51257
authored
Jun 14, 2016
by
Jingning Han
Committed by
Gerrit Code Review
Jun 14, 2016
Browse files
Merge "Fix enc/dec mismatch in non-420 settings" into nextgenv2
parents
a116ab70
a4ea8fd8
Changes
1
Hide whitespace changes
Inline
Side-by-side
vp10/common/reconinter.c
View file @
48f51257
...
...
@@ -706,66 +706,79 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane,
#if CONFIG_DUAL_FILTER
if
(
mi
->
mbmi
.
sb_type
<
BLOCK_8X8
&&
plane
>
0
)
{
int
blk_num
=
1
<<
(
pd
->
subsampling_x
+
pd
->
subsampling_y
);
int
chr_idx
;
int
x_base
=
x
;
int
y_base
=
y
;
int
x_step
=
w
>>
pd
->
subsampling_x
;
int
y_step
=
h
>>
pd
->
subsampling_y
;
for
(
chr_idx
=
0
;
chr_idx
<
blk_num
;
++
chr_idx
)
{
for
(
ref
=
0
;
ref
<
1
+
is_compound
;
++
ref
)
{
const
struct
scale_factors
*
const
sf
=
&
xd
->
block_refs
[
ref
]
->
sf
;
struct
buf_2d
*
const
pre_buf
=
&
pd
->
pre
[
ref
];
struct
buf_2d
*
const
dst_buf
=
&
pd
->
dst
;
uint8_t
*
dst
=
dst_buf
->
buf
;
const
MV
mv
=
mi
->
bmi
[
chr_idx
].
as_mv
[
ref
].
as_mv
;
const
MV
mv_q4
=
clamp_mv_to_umv_border_sb
(
xd
,
&
mv
,
bw
,
bh
,
pd
->
subsampling_x
,
pd
->
subsampling_y
);
uint8_t
*
pre
;
MV32
scaled_mv
;
int
xs
,
ys
,
subpel_x
,
subpel_y
;
const
int
is_scaled
=
vp10_is_scaled
(
sf
);
x
=
x_base
+
(
chr_idx
&
0x01
)
*
x_step
;
y
=
y_base
+
(
chr_idx
>>
1
)
*
y_step
;
dst
+=
dst_buf
->
stride
*
y
+
x
;
if
(
is_scaled
)
{
pre
=
pre_buf
->
buf
+
scaled_buffer_offset
(
x
,
y
,
pre_buf
->
stride
,
sf
);
scaled_mv
=
vp10_scale_mv
(
&
mv_q4
,
mi_x
+
x
,
mi_y
+
y
,
sf
);
xs
=
sf
->
x_step_q4
;
ys
=
sf
->
y_step_q4
;
}
else
{
pre
=
pre_buf
->
buf
+
y
*
pre_buf
->
stride
+
x
;
scaled_mv
.
row
=
mv_q4
.
row
;
scaled_mv
.
col
=
mv_q4
.
col
;
xs
=
ys
=
16
;
}
// block size in log2
const
int
b4_wl
=
b_width_log2_lookup
[
mi
->
mbmi
.
sb_type
];
const
int
b4_hl
=
b_height_log2_lookup
[
mi
->
mbmi
.
sb_type
];
const
int
b8_sl
=
b_width_log2_lookup
[
BLOCK_8X8
];
// block size
const
int
b4_w
=
1
<<
b4_wl
;
const
int
b4_h
=
1
<<
b4_hl
;
const
int
b8_s
=
1
<<
b8_sl
;
int
idx
,
idy
;
const
int
x_base
=
x
;
const
int
y_base
=
y
;
// processing unit size
const
int
x_step
=
w
>>
(
b8_sl
-
b4_wl
);
const
int
y_step
=
h
>>
(
b8_sl
-
b4_hl
);
for
(
idy
=
0
;
idy
<
b8_s
;
idy
+=
b4_h
)
{
for
(
idx
=
0
;
idx
<
b8_s
;
idx
+=
b4_w
)
{
const
int
chr_idx
=
(
idy
*
2
)
+
idx
;
for
(
ref
=
0
;
ref
<
1
+
is_compound
;
++
ref
)
{
const
struct
scale_factors
*
const
sf
=
&
xd
->
block_refs
[
ref
]
->
sf
;
struct
buf_2d
*
const
pre_buf
=
&
pd
->
pre
[
ref
];
struct
buf_2d
*
const
dst_buf
=
&
pd
->
dst
;
uint8_t
*
dst
=
dst_buf
->
buf
;
const
MV
mv
=
mi
->
bmi
[
chr_idx
].
as_mv
[
ref
].
as_mv
;
const
MV
mv_q4
=
clamp_mv_to_umv_border_sb
(
xd
,
&
mv
,
bw
,
bh
,
pd
->
subsampling_x
,
pd
->
subsampling_y
);
uint8_t
*
pre
;
MV32
scaled_mv
;
int
xs
,
ys
,
subpel_x
,
subpel_y
;
const
int
is_scaled
=
vp10_is_scaled
(
sf
);
x
=
x_base
+
idx
*
x_step
;
y
=
y_base
+
idy
*
y_step
;
dst
+=
dst_buf
->
stride
*
y
+
x
;
if
(
is_scaled
)
{
pre
=
pre_buf
->
buf
+
scaled_buffer_offset
(
x
,
y
,
pre_buf
->
stride
,
sf
);
scaled_mv
=
vp10_scale_mv
(
&
mv_q4
,
mi_x
+
x
,
mi_y
+
y
,
sf
);
xs
=
sf
->
x_step_q4
;
ys
=
sf
->
y_step_q4
;
}
else
{
pre
=
pre_buf
->
buf
+
y
*
pre_buf
->
stride
+
x
;
scaled_mv
.
row
=
mv_q4
.
row
;
scaled_mv
.
col
=
mv_q4
.
col
;
xs
=
ys
=
16
;
}
subpel_x
=
scaled_mv
.
col
&
SUBPEL_MASK
;
subpel_y
=
scaled_mv
.
row
&
SUBPEL_MASK
;
pre
+=
(
scaled_mv
.
row
>>
SUBPEL_BITS
)
*
pre_buf
->
stride
+
(
scaled_mv
.
col
>>
SUBPEL_BITS
);
#if CONFIG_EXT_INTER
if
(
ref
&&
is_interinter_wedge_used
(
mi
->
mbmi
.
sb_type
)
&&
mi
->
mbmi
.
use_wedge_interinter
)
vp10_make_masked_inter_predictor
(
pre
,
pre_buf
->
stride
,
dst
,
dst_buf
->
stride
,
subpel_x
,
subpel_y
,
sf
,
w
,
h
,
mi
->
mbmi
.
interp_filter
,
xs
,
ys
,
#if CONFIG_SUPERTX
wedge_offset_x
,
wedge_offset_y
,
#endif // CONFIG_SUPERTX
xd
);
else
#endif // CONFIG_EXT_INTER
vp10_make_inter_predictor
(
pre
,
pre_buf
->
stride
,
dst
,
dst_buf
->
stride
,
subpel_x
,
subpel_y
,
sf
,
x_step
,
y_step
,
ref
,
mi
->
mbmi
.
interp_filter
,
xs
,
ys
,
xd
);
subpel_x
=
scaled_mv
.
col
&
SUBPEL_MASK
;
subpel_y
=
scaled_mv
.
row
&
SUBPEL_MASK
;
pre
+=
(
scaled_mv
.
row
>>
SUBPEL_BITS
)
*
pre_buf
->
stride
+
(
scaled_mv
.
col
>>
SUBPEL_BITS
);
#if CONFIG_EXT_INTER
if
(
ref
&&
is_interinter_wedge_used
(
mi
->
mbmi
.
sb_type
)
&&
mi
->
mbmi
.
use_wedge_interinter
)
vp10_make_masked_inter_predictor
(
pre
,
pre_buf
->
stride
,
dst
,
dst_buf
->
stride
,
subpel_x
,
subpel_y
,
sf
,
w
,
h
,
mi
->
mbmi
.
interp_filter
,
xs
,
ys
,
#if CONFIG_SUPERTX
wedge_offset_x
,
wedge_offset_y
,
#endif // CONFIG_SUPERTX
xd
);
else
#endif // CONFIG_EXT_INTER
vp10_make_inter_predictor
(
pre
,
pre_buf
->
stride
,
dst
,
dst_buf
->
stride
,
subpel_x
,
subpel_y
,
sf
,
x_step
,
y_step
,
ref
,
mi
->
mbmi
.
interp_filter
,
xs
,
ys
,
xd
);
}
}
}
return
;
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment