Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Xiph.Org
aom-rav1e
Commits
92b0e544
Commit
92b0e544
authored
Jun 14, 2011
by
Johann
Browse files
fix --disable-runtime-cpu-detect on x86
Change-Id: Ib8e429152c9a8b6032be22b5faac802aa8224caa
parent
bf6b314d
Changes
3
Hide whitespace changes
Inline
Side-by-side
vp8/decoder/x86/x86_dsystemdependent.c
View file @
92b0e544
...
...
@@ -17,7 +17,7 @@
#if HAVE_MMX
void
vp8_dequantize_b_impl_mmx
(
short
*
sq
,
short
*
dq
,
short
*
q
);
static
void
dequantize_b_mmx
(
BLOCKD
*
d
)
void
vp8_
dequantize_b_mmx
(
BLOCKD
*
d
)
{
short
*
sq
=
(
short
*
)
d
->
qcoeff
;
short
*
dq
=
(
short
*
)
d
->
dqcoeff
;
...
...
@@ -28,6 +28,7 @@ static void dequantize_b_mmx(BLOCKD *d)
void
vp8_arch_x86_decode_init
(
VP8D_COMP
*
pbi
)
{
#if CONFIG_RUNTIME_CPU_DETECT
int
flags
=
x86_simd_caps
();
/* Note:
...
...
@@ -36,12 +37,11 @@ void vp8_arch_x86_decode_init(VP8D_COMP *pbi)
* you modify any of the function mappings present in this file, be sure
* to also update them in static mapings (<arch>/filename_<arch>.h)
*/
#if CONFIG_RUNTIME_CPU_DETECT
/* Override default functions with fastest ones for this CPU. */
#if HAVE_MMX
if
(
flags
&
HAS_MMX
)
{
pbi
->
dequant
.
block
=
dequantize_b_mmx
;
pbi
->
dequant
.
block
=
vp8_
dequantize_b_mmx
;
pbi
->
dequant
.
idct_add
=
vp8_dequant_idct_add_mmx
;
pbi
->
dequant
.
dc_idct_add
=
vp8_dequant_dc_idct_add_mmx
;
pbi
->
dequant
.
dc_idct_add_y_block
=
vp8_dequant_dc_idct_add_y_block_mmx
;
...
...
vp8/encoder/x86/dct_x86.h
View file @
92b0e544
...
...
@@ -31,6 +31,12 @@ extern prototype_fdct(vp8_short_fdct8x4_mmx);
#undef vp8_fdct_short8x4
#define vp8_fdct_short8x4 vp8_short_fdct8x4_mmx
#undef vp8_fdct_fast4x4
#define vp8_fdct_fast4x4 vp8_short_fdct4x4_mmx
#undef vp8_fdct_fast8x4
#define vp8_fdct_fast8x4 vp8_short_fdct8x4_mmx
#endif
#endif
...
...
vp8/encoder/x86/x86_csystemdependent.c
View file @
92b0e544
...
...
@@ -16,7 +16,7 @@
#if HAVE_MMX
static
void
short_fdct8x4_mmx
(
short
*
input
,
short
*
output
,
int
pitch
)
void
vp8_
short_fdct8x4_mmx
(
short
*
input
,
short
*
output
,
int
pitch
)
{
vp8_short_fdct4x4_mmx
(
input
,
output
,
pitch
);
vp8_short_fdct4x4_mmx
(
input
+
4
,
output
+
16
,
pitch
);
...
...
@@ -26,7 +26,7 @@ int vp8_fast_quantize_b_impl_mmx(short *coeff_ptr, short *zbin_ptr,
short
*
qcoeff_ptr
,
short
*
dequant_ptr
,
short
*
scan_mask
,
short
*
round_ptr
,
short
*
quant_ptr
,
short
*
dqcoeff_ptr
);
static
void
fast_quantize_b_mmx
(
BLOCK
*
b
,
BLOCKD
*
d
)
void
vp8_
fast_quantize_b_mmx
(
BLOCK
*
b
,
BLOCKD
*
d
)
{
short
*
scan_mask
=
vp8_default_zig_zag_mask
;
//d->scan_order_mask_ptr;
short
*
coeff_ptr
=
b
->
coeff
;
...
...
@@ -51,7 +51,7 @@ static void fast_quantize_b_mmx(BLOCK *b, BLOCKD *d)
}
int
vp8_mbblock_error_mmx_impl
(
short
*
coeff_ptr
,
short
*
dcoef_ptr
,
int
dc
);
static
int
mbblock_error_mmx
(
MACROBLOCK
*
mb
,
int
dc
)
int
vp8_
mbblock_error_mmx
(
MACROBLOCK
*
mb
,
int
dc
)
{
short
*
coeff_ptr
=
mb
->
block
[
0
].
coeff
;
short
*
dcoef_ptr
=
mb
->
e_mbd
.
block
[
0
].
dqcoeff
;
...
...
@@ -59,7 +59,7 @@ static int mbblock_error_mmx(MACROBLOCK *mb, int dc)
}
int
vp8_mbuverror_mmx_impl
(
short
*
s_ptr
,
short
*
d_ptr
);
static
int
mbuverror_mmx
(
MACROBLOCK
*
mb
)
int
vp8_
mbuverror_mmx
(
MACROBLOCK
*
mb
)
{
short
*
s_ptr
=
&
mb
->
coeff
[
256
];
short
*
d_ptr
=
&
mb
->
e_mbd
.
dqcoeff
[
256
];
...
...
@@ -69,7 +69,7 @@ static int mbuverror_mmx(MACROBLOCK *mb)
void
vp8_subtract_b_mmx_impl
(
unsigned
char
*
z
,
int
src_stride
,
short
*
diff
,
unsigned
char
*
predictor
,
int
pitch
);
static
void
subtract_b_mmx
(
BLOCK
*
be
,
BLOCKD
*
bd
,
int
pitch
)
void
vp8_
subtract_b_mmx
(
BLOCK
*
be
,
BLOCKD
*
bd
,
int
pitch
)
{
unsigned
char
*
z
=
*
(
be
->
base_src
)
+
be
->
src
;
unsigned
int
src_stride
=
be
->
src_stride
;
...
...
@@ -82,7 +82,7 @@ static void subtract_b_mmx(BLOCK *be, BLOCKD *bd, int pitch)
#if HAVE_SSE2
int
vp8_mbblock_error_xmm_impl
(
short
*
coeff_ptr
,
short
*
dcoef_ptr
,
int
dc
);
static
int
mbblock_error_xmm
(
MACROBLOCK
*
mb
,
int
dc
)
int
vp8_
mbblock_error_xmm
(
MACROBLOCK
*
mb
,
int
dc
)
{
short
*
coeff_ptr
=
mb
->
block
[
0
].
coeff
;
short
*
dcoef_ptr
=
mb
->
e_mbd
.
block
[
0
].
dqcoeff
;
...
...
@@ -90,7 +90,7 @@ static int mbblock_error_xmm(MACROBLOCK *mb, int dc)
}
int
vp8_mbuverror_xmm_impl
(
short
*
s_ptr
,
short
*
d_ptr
);
static
int
mbuverror_xmm
(
MACROBLOCK
*
mb
)
int
vp8_
mbuverror_xmm
(
MACROBLOCK
*
mb
)
{
short
*
s_ptr
=
&
mb
->
coeff
[
256
];
short
*
d_ptr
=
&
mb
->
e_mbd
.
dqcoeff
[
256
];
...
...
@@ -100,7 +100,7 @@ static int mbuverror_xmm(MACROBLOCK *mb)
void
vp8_subtract_b_sse2_impl
(
unsigned
char
*
z
,
int
src_stride
,
short
*
diff
,
unsigned
char
*
predictor
,
int
pitch
);
static
void
subtract_b_sse2
(
BLOCK
*
be
,
BLOCKD
*
bd
,
int
pitch
)
void
vp8_
subtract_b_sse2
(
BLOCK
*
be
,
BLOCKD
*
bd
,
int
pitch
)
{
unsigned
char
*
z
=
*
(
be
->
base_src
)
+
be
->
src
;
unsigned
int
src_stride
=
be
->
src_stride
;
...
...
@@ -178,20 +178,20 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi
->
rtcd
.
variance
.
get4x4sse_cs
=
vp8_get4x4sse_cs_mmx
;
cpi
->
rtcd
.
fdct
.
short4x4
=
vp8_short_fdct4x4_mmx
;
cpi
->
rtcd
.
fdct
.
short8x4
=
short_fdct8x4_mmx
;
cpi
->
rtcd
.
fdct
.
short8x4
=
vp8_
short_fdct8x4_mmx
;
cpi
->
rtcd
.
fdct
.
fast4x4
=
vp8_short_fdct4x4_mmx
;
cpi
->
rtcd
.
fdct
.
fast8x4
=
short_fdct8x4_mmx
;
cpi
->
rtcd
.
fdct
.
fast8x4
=
vp8_
short_fdct8x4_mmx
;
cpi
->
rtcd
.
fdct
.
walsh_short4x4
=
vp8_short_walsh4x4_c
;
cpi
->
rtcd
.
encodemb
.
berr
=
vp8_block_error_mmx
;
cpi
->
rtcd
.
encodemb
.
mberr
=
mbblock_error_mmx
;
cpi
->
rtcd
.
encodemb
.
mbuverr
=
mbuverror_mmx
;
cpi
->
rtcd
.
encodemb
.
subb
=
subtract_b_mmx
;
cpi
->
rtcd
.
encodemb
.
mberr
=
vp8_
mbblock_error_mmx
;
cpi
->
rtcd
.
encodemb
.
mbuverr
=
vp8_
mbuverror_mmx
;
cpi
->
rtcd
.
encodemb
.
subb
=
vp8_
subtract_b_mmx
;
cpi
->
rtcd
.
encodemb
.
submby
=
vp8_subtract_mby_mmx
;
cpi
->
rtcd
.
encodemb
.
submbuv
=
vp8_subtract_mbuv_mmx
;
/*cpi->rtcd.quantize.fastquantb = fast_quantize_b_mmx;*/
/*cpi->rtcd.quantize.fastquantb =
vp8_
fast_quantize_b_mmx;*/
}
#endif
...
...
@@ -233,9 +233,9 @@ void vp8_arch_x86_encoder_init(VP8_COMP *cpi)
cpi
->
rtcd
.
fdct
.
walsh_short4x4
=
vp8_short_walsh4x4_sse2
;
cpi
->
rtcd
.
encodemb
.
berr
=
vp8_block_error_xmm
;
cpi
->
rtcd
.
encodemb
.
mberr
=
mbblock_error_xmm
;
cpi
->
rtcd
.
encodemb
.
mbuverr
=
mbuverror_xmm
;
cpi
->
rtcd
.
encodemb
.
subb
=
subtract_b_sse2
;
cpi
->
rtcd
.
encodemb
.
mberr
=
vp8_
mbblock_error_xmm
;
cpi
->
rtcd
.
encodemb
.
mbuverr
=
vp8_
mbuverror_xmm
;
cpi
->
rtcd
.
encodemb
.
subb
=
vp8_
subtract_b_sse2
;
cpi
->
rtcd
.
encodemb
.
submby
=
vp8_subtract_mby_sse2
;
cpi
->
rtcd
.
encodemb
.
submbuv
=
vp8_subtract_mbuv_sse2
;
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment