Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Xiph.Org
aom-rav1e
Commits
93ec31df
Commit
93ec31df
authored
Oct 24, 2013
by
Yunqing Wang
Committed by
Gerrit Code Review
Oct 24, 2013
Browse files
Merge "Improve scale_factors struct"
parents
eec622d1
175c313a
Changes
8
Hide whitespace changes
Inline
Side-by-side
vp9/common/vp9_onyxc_int.h
View file @
93ec31df
...
...
@@ -115,6 +115,7 @@ typedef struct VP9Common {
// Each frame can reference ALLOWED_REFS_PER_FRAME buffers
int
active_ref_idx
[
ALLOWED_REFS_PER_FRAME
];
struct
scale_factors
active_ref_scale
[
ALLOWED_REFS_PER_FRAME
];
struct
scale_factors_common
active_ref_scale_comm
[
ALLOWED_REFS_PER_FRAME
];
int
new_fb_idx
;
YV12_BUFFER_CONFIG
post_proc_buffer
;
...
...
vp9/common/vp9_reconinter.c
View file @
93ec31df
...
...
@@ -40,6 +40,24 @@ void vp9_setup_interp_filters(MACROBLOCKD *xd,
assert
(((
intptr_t
)
xd
->
subpix
.
filter_x
&
0xff
)
==
0
);
}
static
void
inter_predictor
(
const
uint8_t
*
src
,
int
src_stride
,
uint8_t
*
dst
,
int
dst_stride
,
const
MV32
*
mv
,
const
struct
scale_factors
*
scale
,
int
w
,
int
h
,
int
ref
,
const
struct
subpix_fn_table
*
subpix
,
int
xs
,
int
ys
)
{
const
int
subpel_x
=
mv
->
col
&
SUBPEL_MASK
;
const
int
subpel_y
=
mv
->
row
&
SUBPEL_MASK
;
src
+=
(
mv
->
row
>>
SUBPEL_BITS
)
*
src_stride
+
(
mv
->
col
>>
SUBPEL_BITS
);
scale
->
sfc
->
predict
[
subpel_x
!=
0
][
subpel_y
!=
0
][
ref
](
src
,
src_stride
,
dst
,
dst_stride
,
subpix
->
filter_x
[
subpel_x
],
xs
,
subpix
->
filter_y
[
subpel_y
],
ys
,
w
,
h
);
}
void
vp9_build_inter_predictor
(
const
uint8_t
*
src
,
int
src_stride
,
uint8_t
*
dst
,
int
dst_stride
,
const
MV
*
src_mv
,
...
...
@@ -50,16 +68,11 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
const
int
is_q4
=
precision
==
MV_PRECISION_Q4
;
const
MV
mv_q4
=
{
is_q4
?
src_mv
->
row
:
src_mv
->
row
*
2
,
is_q4
?
src_mv
->
col
:
src_mv
->
col
*
2
};
const
MV32
mv
=
scale
->
scale_mv
(
&
mv_q4
,
scale
);
const
int
subpel_x
=
mv
.
col
&
SUBPEL_MASK
;
const
int
subpel_y
=
mv
.
row
&
SUBPEL_MASK
;
const
struct
scale_factors_common
*
sfc
=
scale
->
sfc
;
const
MV32
mv
=
sfc
->
scale_mv
(
&
mv_q4
,
scale
);
src
+=
(
mv
.
row
>>
SUBPEL_BITS
)
*
src_stride
+
(
mv
.
col
>>
SUBPEL_BITS
);
scale
->
predict
[
subpel_x
!=
0
][
subpel_y
!=
0
][
ref
](
src
,
src_stride
,
dst
,
dst_stride
,
subpix
->
filter_x
[
subpel_x
],
scale
->
x_step_q4
,
subpix
->
filter_y
[
subpel_y
],
scale
->
y_step_q4
,
w
,
h
);
inter_predictor
(
src
,
src_stride
,
dst
,
dst_stride
,
&
mv
,
scale
,
w
,
h
,
ref
,
subpix
,
sfc
->
x_step_q4
,
sfc
->
y_step_q4
);
}
static
INLINE
int
round_mv_comp_q4
(
int
value
)
{
...
...
@@ -133,10 +146,6 @@ static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
struct
scale_factors
*
const
scale
=
&
xd
->
scale_factor
[
ref
];
struct
buf_2d
*
const
pre_buf
=
&
pd
->
pre
[
ref
];
struct
buf_2d
*
const
dst_buf
=
&
pd
->
dst
;
const
uint8_t
*
const
pre
=
pre_buf
->
buf
+
scaled_buffer_offset
(
x
,
y
,
pre_buf
->
stride
,
scale
);
uint8_t
*
const
dst
=
dst_buf
->
buf
+
dst_buf
->
stride
*
y
+
x
;
// TODO(jkoleszar): All chroma MVs in SPLITMV mode are taken as the
...
...
@@ -156,11 +165,29 @@ static void build_inter_predictors(int plane, int block, BLOCK_SIZE bsize,
pd
->
subsampling_x
,
pd
->
subsampling_y
);
scale
->
set_scaled_offsets
(
scale
,
arg
->
y
+
y
,
arg
->
x
+
x
);
vp9_build_inter_predictor
(
pre
,
pre_buf
->
stride
,
dst
,
dst_buf
->
stride
,
&
res_mv
,
scale
,
4
<<
pred_w
,
4
<<
pred_h
,
ref
,
&
xd
->
subpix
,
MV_PRECISION_Q4
);
uint8_t
*
pre
;
// mv_precision precision is MV_PRECISION_Q4.
const
MV
mv_q4
=
{
res_mv
.
row
,
res_mv
.
col
};
MV32
scaled_mv
;
int
xs
,
ys
;
if
(
vp9_is_scaled
(
scale
->
sfc
))
{
pre
=
pre_buf
->
buf
+
scaled_buffer_offset
(
x
,
y
,
pre_buf
->
stride
,
scale
);
scale
->
sfc
->
set_scaled_offsets
(
scale
,
arg
->
y
+
y
,
arg
->
x
+
x
);
scaled_mv
=
scale
->
sfc
->
scale_mv
(
&
mv_q4
,
scale
);
xs
=
scale
->
sfc
->
x_step_q4
;
ys
=
scale
->
sfc
->
y_step_q4
;
}
else
{
pre
=
pre_buf
->
buf
+
(
y
*
pre_buf
->
stride
+
x
);
scaled_mv
.
row
=
mv_q4
.
row
;
scaled_mv
.
col
=
mv_q4
.
col
;
xs
=
ys
=
16
;
}
inter_predictor
(
pre
,
pre_buf
->
stride
,
dst
,
dst_buf
->
stride
,
&
scaled_mv
,
scale
,
4
<<
pred_w
,
4
<<
pred_h
,
ref
,
&
xd
->
subpix
,
xs
,
ys
);
}
}
...
...
@@ -220,15 +247,17 @@ void vp9_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
void
vp9_setup_scale_factors
(
VP9_COMMON
*
cm
,
int
i
)
{
const
int
ref
=
cm
->
active_ref_idx
[
i
];
struct
scale_factors
*
const
sf
=
&
cm
->
active_ref_scale
[
i
];
struct
scale_factors_common
*
const
sfc
=
&
cm
->
active_ref_scale_comm
[
i
];
if
(
ref
>=
NUM_YV12_BUFFERS
)
{
vp9_zero
(
*
sf
);
vp9_zero
(
*
sfc
);
}
else
{
YV12_BUFFER_CONFIG
*
const
fb
=
&
cm
->
yv12_fb
[
ref
];
vp9_setup_scale_factors_for_frame
(
sf
,
vp9_setup_scale_factors_for_frame
(
sf
,
sfc
,
fb
->
y_crop_width
,
fb
->
y_crop_height
,
cm
->
width
,
cm
->
height
);
if
(
vp9_is_scaled
(
sf
))
if
(
vp9_is_scaled
(
sf
c
))
vp9_extend_frame_borders
(
fb
,
cm
->
subsampling_x
,
cm
->
subsampling_y
);
}
}
...
...
vp9/common/vp9_reconinter.h
View file @
93ec31df
...
...
@@ -38,8 +38,10 @@ void vp9_build_inter_predictor(const uint8_t *src, int src_stride,
static
int
scaled_buffer_offset
(
int
x_offset
,
int
y_offset
,
int
stride
,
const
struct
scale_factors
*
scale
)
{
const
int
x
=
scale
?
scale
->
scale_value_x
(
x_offset
,
scale
)
:
x_offset
;
const
int
y
=
scale
?
scale
->
scale_value_y
(
y_offset
,
scale
)
:
y_offset
;
const
int
x
=
scale
?
scale
->
sfc
->
scale_value_x
(
x_offset
,
scale
->
sfc
)
:
x_offset
;
const
int
y
=
scale
?
scale
->
sfc
->
scale_value_y
(
y_offset
,
scale
->
sfc
)
:
y_offset
;
return
y
*
stride
+
x
;
}
...
...
vp9/common/vp9_scale.c
View file @
93ec31df
...
...
@@ -12,23 +12,23 @@
#include "vp9/common/vp9_filter.h"
#include "vp9/common/vp9_scale.h"
static
INLINE
int
scaled_x
(
int
val
,
const
struct
scale_factors
*
scale
)
{
return
val
*
sc
ale
->
x_scale_fp
>>
REF_SCALE_SHIFT
;
static
INLINE
int
scaled_x
(
int
val
,
const
struct
scale_factors
_common
*
sfc
)
{
return
val
*
s
f
c
->
x_scale_fp
>>
REF_SCALE_SHIFT
;
}
static
INLINE
int
scaled_y
(
int
val
,
const
struct
scale_factors
*
scale
)
{
return
val
*
sc
ale
->
y_scale_fp
>>
REF_SCALE_SHIFT
;
static
INLINE
int
scaled_y
(
int
val
,
const
struct
scale_factors
_common
*
sfc
)
{
return
val
*
s
f
c
->
y_scale_fp
>>
REF_SCALE_SHIFT
;
}
static
int
unscaled_value
(
int
val
,
const
struct
scale_factors
*
scale
)
{
(
void
)
sc
ale
;
static
int
unscaled_value
(
int
val
,
const
struct
scale_factors
_common
*
sfc
)
{
(
void
)
s
f
c
;
return
val
;
}
static
MV32
scaled_mv
(
const
MV
*
mv
,
const
struct
scale_factors
*
scale
)
{
const
MV32
res
=
{
scaled_y
(
mv
->
row
,
scale
)
+
scale
->
y_offset_q4
,
scaled_x
(
mv
->
col
,
scale
)
+
scale
->
x_offset_q4
scaled_y
(
mv
->
row
,
scale
->
sfc
)
+
scale
->
y_offset_q4
,
scaled_x
(
mv
->
col
,
scale
->
sfc
)
+
scale
->
x_offset_q4
};
return
res
;
}
...
...
@@ -43,8 +43,8 @@ static MV32 unscaled_mv(const MV *mv, const struct scale_factors *scale) {
static
void
set_offsets_with_scaling
(
struct
scale_factors
*
scale
,
int
row
,
int
col
)
{
scale
->
x_offset_q4
=
scaled_x
(
col
<<
SUBPEL_BITS
,
scale
)
&
SUBPEL_MASK
;
scale
->
y_offset_q4
=
scaled_y
(
row
<<
SUBPEL_BITS
,
scale
)
&
SUBPEL_MASK
;
scale
->
x_offset_q4
=
scaled_x
(
col
<<
SUBPEL_BITS
,
scale
->
sfc
)
&
SUBPEL_MASK
;
scale
->
y_offset_q4
=
scaled_y
(
row
<<
SUBPEL_BITS
,
scale
->
sfc
)
&
SUBPEL_MASK
;
}
static
void
set_offsets_without_scaling
(
struct
scale_factors
*
scale
,
...
...
@@ -70,31 +70,30 @@ static int check_scale_factors(int other_w, int other_h,
}
void
vp9_setup_scale_factors_for_frame
(
struct
scale_factors
*
scale
,
struct
scale_factors_common
*
scale_comm
,
int
other_w
,
int
other_h
,
int
this_w
,
int
this_h
)
{
if
(
!
check_scale_factors
(
other_w
,
other_h
,
this_w
,
this_h
))
{
scale
->
x_scale_fp
=
REF_INVALID_SCALE
;
scale
->
y_scale_fp
=
REF_INVALID_SCALE
;
scale
_comm
->
x_scale_fp
=
REF_INVALID_SCALE
;
scale
_comm
->
y_scale_fp
=
REF_INVALID_SCALE
;
return
;
}
scale
->
x_scale_fp
=
get_fixed_point_scale_factor
(
other_w
,
this_w
);
scale
->
y_scale_fp
=
get_fixed_point_scale_factor
(
other_h
,
this_h
);
scale
->
x_step_q4
=
scaled_x
(
16
,
scale
);
scale
->
y_step_q4
=
scaled_y
(
16
,
scale
);
scale
->
x_offset_q4
=
0
;
// calculated per block
scale
->
y_offset_q4
=
0
;
// calculated per block
scale_comm
->
x_scale_fp
=
get_fixed_point_scale_factor
(
other_w
,
this_w
);
scale_comm
->
y_scale_fp
=
get_fixed_point_scale_factor
(
other_h
,
this_h
);
scale_comm
->
x_step_q4
=
scaled_x
(
16
,
scale_comm
);
scale_comm
->
y_step_q4
=
scaled_y
(
16
,
scale_comm
);
if
(
vp9_is_scaled
(
scale
))
{
scale
->
scale_value_x
=
scaled_x
;
scale
->
scale_value_y
=
scaled_y
;
scale
->
set_scaled_offsets
=
set_offsets_with_scaling
;
scale
->
scale_mv
=
scaled_mv
;
if
(
vp9_is_scaled
(
scale
_comm
))
{
scale
_comm
->
scale_value_x
=
scaled_x
;
scale
_comm
->
scale_value_y
=
scaled_y
;
scale
_comm
->
set_scaled_offsets
=
set_offsets_with_scaling
;
scale
_comm
->
scale_mv
=
scaled_mv
;
}
else
{
scale
->
scale_value_x
=
unscaled_value
;
scale
->
scale_value_y
=
unscaled_value
;
scale
->
set_scaled_offsets
=
set_offsets_without_scaling
;
scale
->
scale_mv
=
unscaled_mv
;
scale
_comm
->
scale_value_x
=
unscaled_value
;
scale
_comm
->
scale_value_y
=
unscaled_value
;
scale
_comm
->
set_scaled_offsets
=
set_offsets_without_scaling
;
scale
_comm
->
scale_mv
=
unscaled_mv
;
}
// TODO(agrange): Investigate the best choice of functions to use here
...
...
@@ -103,44 +102,48 @@ void vp9_setup_scale_factors_for_frame(struct scale_factors *scale,
// applied in one direction only, and not at all for 0,0, seems to give the
// best quality, but it may be worth trying an additional mode that does
// do the filtering on full-pel.
if
(
scale
->
x_step_q4
==
16
)
{
if
(
scale
->
y_step_q4
==
16
)
{
if
(
scale
_comm
->
x_step_q4
==
16
)
{
if
(
scale
_comm
->
y_step_q4
==
16
)
{
// No scaling in either direction.
scale
->
predict
[
0
][
0
][
0
]
=
vp9_convolve_copy
;
scale
->
predict
[
0
][
0
][
1
]
=
vp9_convolve_avg
;
scale
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8_vert
;
scale
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg_vert
;
scale
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8_horiz
;
scale
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg_horiz
;
scale
_comm
->
predict
[
0
][
0
][
0
]
=
vp9_convolve_copy
;
scale
_comm
->
predict
[
0
][
0
][
1
]
=
vp9_convolve_avg
;
scale
_comm
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8_vert
;
scale
_comm
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg_vert
;
scale
_comm
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8_horiz
;
scale
_comm
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg_horiz
;
}
else
{
// No scaling in x direction. Must always scale in the y direction.
scale
->
predict
[
0
][
0
][
0
]
=
vp9_convolve8_vert
;
scale
->
predict
[
0
][
0
][
1
]
=
vp9_convolve8_avg_vert
;
scale
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8_vert
;
scale
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg_vert
;
scale
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8
;
scale
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg
;
scale
_comm
->
predict
[
0
][
0
][
0
]
=
vp9_convolve8_vert
;
scale
_comm
->
predict
[
0
][
0
][
1
]
=
vp9_convolve8_avg_vert
;
scale
_comm
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8_vert
;
scale
_comm
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg_vert
;
scale
_comm
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8
;
scale
_comm
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg
;
}
}
else
{
if
(
scale
->
y_step_q4
==
16
)
{
if
(
scale
_comm
->
y_step_q4
==
16
)
{
// No scaling in the y direction. Must always scale in the x direction.
scale
->
predict
[
0
][
0
][
0
]
=
vp9_convolve8_horiz
;
scale
->
predict
[
0
][
0
][
1
]
=
vp9_convolve8_avg_horiz
;
scale
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8
;
scale
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg
;
scale
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8_horiz
;
scale
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg_horiz
;
scale
_comm
->
predict
[
0
][
0
][
0
]
=
vp9_convolve8_horiz
;
scale
_comm
->
predict
[
0
][
0
][
1
]
=
vp9_convolve8_avg_horiz
;
scale
_comm
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8
;
scale
_comm
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg
;
scale
_comm
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8_horiz
;
scale
_comm
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg_horiz
;
}
else
{
// Must always scale in both directions.
scale
->
predict
[
0
][
0
][
0
]
=
vp9_convolve8
;
scale
->
predict
[
0
][
0
][
1
]
=
vp9_convolve8_avg
;
scale
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8
;
scale
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg
;
scale
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8
;
scale
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg
;
scale
_comm
->
predict
[
0
][
0
][
0
]
=
vp9_convolve8
;
scale
_comm
->
predict
[
0
][
0
][
1
]
=
vp9_convolve8_avg
;
scale
_comm
->
predict
[
0
][
1
][
0
]
=
vp9_convolve8
;
scale
_comm
->
predict
[
0
][
1
][
1
]
=
vp9_convolve8_avg
;
scale
_comm
->
predict
[
1
][
0
][
0
]
=
vp9_convolve8
;
scale
_comm
->
predict
[
1
][
0
][
1
]
=
vp9_convolve8_avg
;
}
}
// 2D subpel motion always gets filtered in both directions
scale
->
predict
[
1
][
1
][
0
]
=
vp9_convolve8
;
scale
->
predict
[
1
][
1
][
1
]
=
vp9_convolve8_avg
;
scale_comm
->
predict
[
1
][
1
][
0
]
=
vp9_convolve8
;
scale_comm
->
predict
[
1
][
1
][
1
]
=
vp9_convolve8_avg
;
scale
->
sfc
=
scale_comm
;
scale
->
x_offset_q4
=
0
;
// calculated per block
scale
->
y_offset_q4
=
0
;
// calculated per block
}
vp9/common/vp9_scale.h
View file @
93ec31df
...
...
@@ -18,34 +18,40 @@
#define REF_NO_SCALE (1 << REF_SCALE_SHIFT)
#define REF_INVALID_SCALE -1
struct
scale_factors
{
struct
scale_factors
;
struct
scale_factors_common
{
int
x_scale_fp
;
// horizontal fixed point scale factor
int
y_scale_fp
;
// vertical fixed point scale factor
int
x_offset_q4
;
int
x_step_q4
;
int
y_offset_q4
;
int
y_step_q4
;
int
(
*
scale_value_x
)(
int
val
,
const
struct
scale_factors
*
scale
);
int
(
*
scale_value_y
)(
int
val
,
const
struct
scale_factors
*
scale
);
int
(
*
scale_value_x
)(
int
val
,
const
struct
scale_factors
_common
*
sfc
);
int
(
*
scale_value_y
)(
int
val
,
const
struct
scale_factors
_common
*
sfc
);
void
(
*
set_scaled_offsets
)(
struct
scale_factors
*
scale
,
int
row
,
int
col
);
MV32
(
*
scale_mv
)(
const
MV
*
mv
,
const
struct
scale_factors
*
scale
);
convolve_fn_t
predict
[
2
][
2
][
2
];
// horiz, vert, avg
};
struct
scale_factors
{
int
x_offset_q4
;
int
y_offset_q4
;
const
struct
scale_factors_common
*
sfc
;
};
void
vp9_setup_scale_factors_for_frame
(
struct
scale_factors
*
scale
,
struct
scale_factors_common
*
scale_comm
,
int
other_w
,
int
other_h
,
int
this_w
,
int
this_h
);
static
int
vp9_is_valid_scale
(
const
struct
scale_factors
*
sf
)
{
return
sf
->
x_scale_fp
!=
REF_INVALID_SCALE
&&
sf
->
y_scale_fp
!=
REF_INVALID_SCALE
;
static
int
vp9_is_valid_scale
(
const
struct
scale_factors
_common
*
sf
c
)
{
return
sf
c
->
x_scale_fp
!=
REF_INVALID_SCALE
&&
sf
c
->
y_scale_fp
!=
REF_INVALID_SCALE
;
}
static
int
vp9_is_scaled
(
const
struct
scale_factors
*
sf
)
{
return
sf
->
x_scale_fp
!=
REF_NO_SCALE
||
sf
->
y_scale_fp
!=
REF_NO_SCALE
;
static
int
vp9_is_scaled
(
const
struct
scale_factors
_common
*
sf
c
)
{
return
sf
c
->
x_scale_fp
!=
REF_NO_SCALE
||
sf
c
->
y_scale_fp
!=
REF_NO_SCALE
;
}
#endif // VP9_COMMON_VP9_SCALE_H_
vp9/decoder/vp9_decodframe.c
View file @
93ec31df
...
...
@@ -295,13 +295,13 @@ static void set_ref(VP9_COMMON *const cm, MACROBLOCKD *const xd,
MB_MODE_INFO
*
const
mbmi
=
&
xd
->
mi_8x8
[
0
]
->
mbmi
;
const
int
ref
=
mbmi
->
ref_frame
[
idx
]
-
LAST_FRAME
;
const
YV12_BUFFER_CONFIG
*
cfg
=
&
cm
->
yv12_fb
[
cm
->
active_ref_idx
[
ref
]];
const
struct
scale_factors
*
sf
=
&
cm
->
active_ref_scale
[
ref
];
if
(
!
vp9_is_valid_scale
(
sf
))
const
struct
scale_factors
_common
*
sf
c
=
&
cm
->
active_ref_scale
_comm
[
ref
];
if
(
!
vp9_is_valid_scale
(
sf
c
))
vpx_internal_error
(
&
cm
->
error
,
VPX_CODEC_UNSUP_BITSTREAM
,
"Invalid scale factors"
);
xd
->
scale_factor
[
idx
]
=
*
sf
;
setup_pre_planes
(
xd
,
idx
,
cfg
,
mi_row
,
mi_col
,
sf
);
xd
->
scale_factor
[
idx
]
.
sfc
=
sf
c
;
setup_pre_planes
(
xd
,
idx
,
cfg
,
mi_row
,
mi_col
,
&
xd
->
scale_factor
[
idx
]
);
xd
->
corrupted
|=
cfg
->
corrupted
;
}
...
...
vp9/encoder/vp9_rdopt.c
View file @
93ec31df
...
...
@@ -2269,10 +2269,10 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
scale[frame_type] = cpi->common.active_ref_scale[frame_type - 1];
scale[frame_type].x_offset_q4 =
ROUND_POWER_OF_TWO
(
mi_col
*
MI_SIZE
*
scale
[
frame_type
].
x_scale_fp
,
ROUND_POWER_OF_TWO(mi_col * MI_SIZE * scale[frame_type].
sfc->
x_scale_fp,
REF_SCALE_SHIFT) & 0xf;
scale[frame_type].y_offset_q4 =
ROUND_POWER_OF_TWO
(
mi_row
*
MI_SIZE
*
scale
[
frame_type
].
y_scale_fp
,
ROUND_POWER_OF_TWO(mi_row * MI_SIZE * scale[frame_type].
sfc->
y_scale_fp,
REF_SCALE_SHIFT) & 0xf;
// TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
...
...
@@ -2295,7 +2295,7 @@ static void setup_buffer_inter(VP9_COMP *cpi, MACROBLOCK *x,
// Further refinement that is encode side only to test the top few candidates
// in full and choose the best as the centre point for subsequent searches.
// The current implementation doesn't support scaling.
if
(
!
vp9_is_scaled
(
&
scale
[
frame_type
])
&&
block_size
>=
BLOCK_8X8
)
if (!vp9_is_scaled(scale[frame_type]
.sfc
) && block_size >= BLOCK_8X8)
mv_pred(cpi, x, yv12_mb[frame_type][0].buf, yv12->y_stride,
frame_type, block_size);
}
...
...
@@ -2502,9 +2502,9 @@ static void joint_motion_search(VP9_COMP *cpi, MACROBLOCK *x,
setup_pre_planes(xd, 1, scaled_ref_frame[1], mi_row, mi_col, NULL);
}
xd
->
scale_factor
[
0
].
set_scaled_offsets
(
&
xd
->
scale_factor
[
0
],
xd->scale_factor[0].
sfc->
set_scaled_offsets(&xd->scale_factor[0],
mi_row, mi_col);
xd
->
scale_factor
[
1
].
set_scaled_offsets
(
&
xd
->
scale_factor
[
1
],
xd->scale_factor[1].
sfc->
set_scaled_offsets(&xd->scale_factor[1],
mi_row, mi_col);
scaled_first_yv12 = xd->plane[0].pre[0];
...
...
@@ -3963,11 +3963,11 @@ int64_t vp9_rd_pick_inter_mode_sub8x8(VP9_COMP *cpi, MACROBLOCK *x,
// TODO(jingning, jkoleszar): scaling reference frame not supported for
// sub8x8 blocks.
if (ref_frame > 0 &&
vp9_is_scaled
(
&
scale_factor
[
ref_frame
]))
vp9_is_scaled(scale_factor[ref_frame]
.sfc
))
continue;
if (second_ref_frame > 0 &&
vp9_is_scaled
(
&
scale_factor
[
second_ref_frame
]))
vp9_is_scaled(scale_factor[second_ref_frame]
.sfc
))
continue;
set_scale_factors(xd, ref_frame, second_ref_frame, scale_factor);
...
...
vp9/encoder/vp9_temporal_filter.c
View file @
93ec31df
...
...
@@ -38,14 +38,15 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
int
stride
,
int
mv_row
,
int
mv_col
,
uint8_t
*
pred
)
{
uint8_t
*
pred
,
struct
scale_factors
*
scale
)
{
const
int
which_mv
=
0
;
MV
mv
=
{
mv_row
,
mv_col
};
vp9_build_inter_predictor
(
y_mb_ptr
,
stride
,
&
pred
[
0
],
16
,
&
mv
,
&
xd
->
scale_factor
[
which_mv
]
,
scale
,
16
,
16
,
which_mv
,
&
xd
->
subpix
,
MV_PRECISION_Q3
);
...
...
@@ -55,7 +56,7 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
vp9_build_inter_predictor
(
u_mb_ptr
,
stride
,
&
pred
[
256
],
8
,
&
mv
,
&
xd
->
scale_factor
[
which_mv
]
,
scale
,
8
,
8
,
which_mv
,
&
xd
->
subpix
,
MV_PRECISION_Q4
);
...
...
@@ -63,7 +64,7 @@ static void temporal_filter_predictors_mb_c(MACROBLOCKD *xd,
vp9_build_inter_predictor
(
v_mb_ptr
,
stride
,
&
pred
[
320
],
8
,
&
mv
,
&
xd
->
scale_factor
[
which_mv
]
,
scale
,
8
,
8
,
which_mv
,
&
xd
->
subpix
,
MV_PRECISION_Q4
);
...
...
@@ -186,7 +187,8 @@ static int temporal_filter_find_matching_mb_c(VP9_COMP *cpi,
static
void
temporal_filter_iterate_c
(
VP9_COMP
*
cpi
,
int
frame_count
,
int
alt_ref_index
,
int
strength
)
{
int
strength
,
struct
scale_factors
*
scale
)
{
int
byte
;
int
frame
;
int
mb_col
,
mb_row
;
...
...
@@ -280,7 +282,7 @@ static void temporal_filter_iterate_c(VP9_COMP *cpi,
cpi
->
frames
[
frame
]
->
y_stride
,
mbd
->
mi_8x8
[
0
]
->
bmi
[
0
].
as_mv
[
0
].
as_mv
.
row
,
mbd
->
mi_8x8
[
0
]
->
bmi
[
0
].
as_mv
[
0
].
as_mv
.
col
,
predictor
);
predictor
,
scale
);
// Apply the filter (YUV)
vp9_temporal_filter_apply
(
f
->
y_buffer
+
mb_y_offset
,
f
->
y_stride
,
...
...
@@ -374,6 +376,9 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
const
int
num_frames_forward
=
vp9_lookahead_depth
(
cpi
->
lookahead
)
-
(
num_frames_backward
+
1
);
struct
scale_factors
scale
;
struct
scale_factors_common
scale_comm
;
switch
(
blur_type
)
{
case
1
:
// Backward Blur
...
...
@@ -432,7 +437,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
#endif
// Setup scaling factors. Scaling on each of the arnr frames is not supported
vp9_setup_scale_factors_for_frame
(
&
cpi
->
mb
.
e_mbd
.
scale_factor
[
0
]
,
vp9_setup_scale_factors_for_frame
(
&
scale
,
&
scale_comm
,
cm
->
yv12_fb
[
cm
->
new_fb_idx
].
y_crop_width
,
cm
->
yv12_fb
[
cm
->
new_fb_idx
].
y_crop_height
,
cm
->
width
,
cm
->
height
);
...
...
@@ -447,7 +452,7 @@ void vp9_temporal_filter_prepare(VP9_COMP *cpi, int distance) {
}
temporal_filter_iterate_c
(
cpi
,
frames_to_blur
,
frames_to_blur_backward
,
strength
);
strength
,
&
scale
);
}
void
configure_arnr_filter
(
VP9_COMP
*
cpi
,
const
unsigned
int
this_frame
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment