Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Open sidebar
Xiph.Org
aom-rav1e
Commits
9b8444a1
Commit
9b8444a1
authored
Mar 31, 2017
by
Steinar Midtskogen
Committed by
Jean-Marc Valin
Apr 02, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add v64_ssub_u16, v128_ssub_u16 and v256_ssub_u16
Change-Id: I60543913cbd8dc5cad524ab74697227f9e93836e
parent
131a0d55
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
93 additions
and
45 deletions
+93
-45
aom_dsp/simd/v128_intrinsics.h
aom_dsp/simd/v128_intrinsics.h
+1
-0
aom_dsp/simd/v128_intrinsics_arm.h
aom_dsp/simd/v128_intrinsics_arm.h
+5
-0
aom_dsp/simd/v128_intrinsics_c.h
aom_dsp/simd/v128_intrinsics_c.h
+5
-0
aom_dsp/simd/v128_intrinsics_x86.h
aom_dsp/simd/v128_intrinsics_x86.h
+2
-0
aom_dsp/simd/v256_intrinsics.h
aom_dsp/simd/v256_intrinsics.h
+1
-0
aom_dsp/simd/v256_intrinsics_c.h
aom_dsp/simd/v256_intrinsics_c.h
+5
-0
aom_dsp/simd/v256_intrinsics_v128.h
aom_dsp/simd/v256_intrinsics_v128.h
+4
-0
aom_dsp/simd/v256_intrinsics_x86.h
aom_dsp/simd/v256_intrinsics_x86.h
+4
-0
aom_dsp/simd/v64_intrinsics.h
aom_dsp/simd/v64_intrinsics.h
+1
-0
aom_dsp/simd/v64_intrinsics_arm.h
aom_dsp/simd/v64_intrinsics_arm.h
+5
-0
aom_dsp/simd/v64_intrinsics_c.h
aom_dsp/simd/v64_intrinsics_c.h
+9
-0
aom_dsp/simd/v64_intrinsics_x86.h
aom_dsp/simd/v64_intrinsics_x86.h
+2
-0
test/simd_cmp_impl.h
test/simd_cmp_impl.h
+2
-0
test/simd_impl.h
test/simd_impl.h
+47
-45
No files found.
aom_dsp/simd/v128_intrinsics.h
View file @
9b8444a1
...
...
@@ -94,6 +94,7 @@ SIMD_INLINE v128 v128_ssub_u8(v128 a, v128 b) { return c_v128_ssub_u8(a, b); }
SIMD_INLINE
v128
v128_ssub_s8
(
v128
a
,
v128
b
)
{
return
c_v128_ssub_s8
(
a
,
b
);
}
SIMD_INLINE
v128
v128_sub_16
(
v128
a
,
v128
b
)
{
return
c_v128_sub_16
(
a
,
b
);
}
SIMD_INLINE
v128
v128_ssub_s16
(
v128
a
,
v128
b
)
{
return
c_v128_ssub_s16
(
a
,
b
);
}
SIMD_INLINE
v128
v128_ssub_u16
(
v128
a
,
v128
b
)
{
return
c_v128_ssub_u16
(
a
,
b
);
}
SIMD_INLINE
v128
v128_sub_32
(
v128
a
,
v128
b
)
{
return
c_v128_sub_32
(
a
,
b
);
}
SIMD_INLINE
v128
v128_abs_s16
(
v128
a
)
{
return
c_v128_abs_s16
(
a
);
}
...
...
aom_dsp/simd/v128_intrinsics_arm.h
View file @
9b8444a1
...
...
@@ -184,6 +184,11 @@ SIMD_INLINE v128 v128_ssub_s16(v128 x, v128 y) {
vqsubq_s16
(
vreinterpretq_s16_s64
(
x
),
vreinterpretq_s16_s64
(
y
)));
}
SIMD_INLINE
v128
v128_ssub_u16
(
v128
x
,
v128
y
)
{
return
vreinterpretq_s64_u16
(
vqsubq_u16
(
vreinterpretq_u16_s64
(
x
),
vreinterpretq_u16_s64
(
y
)));
}
SIMD_INLINE
v128
v128_ssub_u8
(
v128
x
,
v128
y
)
{
return
vreinterpretq_s64_u8
(
vqsubq_u8
(
vreinterpretq_u8_s64
(
x
),
vreinterpretq_u8_s64
(
y
)));
...
...
aom_dsp/simd/v128_intrinsics_c.h
View file @
9b8444a1
...
...
@@ -230,6 +230,11 @@ SIMD_INLINE c_v128 c_v128_ssub_s16(c_v128 a, c_v128 b) {
c_v64_ssub_s16
(
a
.
v64
[
0
],
b
.
v64
[
0
]));
}
SIMD_INLINE
c_v128
c_v128_ssub_u16
(
c_v128
a
,
c_v128
b
)
{
return
c_v128_from_v64
(
c_v64_ssub_u16
(
a
.
v64
[
1
],
b
.
v64
[
1
]),
c_v64_ssub_u16
(
a
.
v64
[
0
],
b
.
v64
[
0
]));
}
SIMD_INLINE
c_v128
c_v128_sub_32
(
c_v128
a
,
c_v128
b
)
{
return
c_v128_from_v64
(
c_v64_sub_32
(
a
.
v64
[
1
],
b
.
v64
[
1
]),
c_v64_sub_32
(
a
.
v64
[
0
],
b
.
v64
[
0
]));
...
...
aom_dsp/simd/v128_intrinsics_x86.h
View file @
9b8444a1
...
...
@@ -108,6 +108,8 @@ SIMD_INLINE v128 v128_sub_16(v128 a, v128 b) { return _mm_sub_epi16(a, b); }
SIMD_INLINE
v128
v128_ssub_s16
(
v128
a
,
v128
b
)
{
return
_mm_subs_epi16
(
a
,
b
);
}
SIMD_INLINE
v128
v128_ssub_u16
(
v128
a
,
v128
b
)
{
return
_mm_subs_epu16
(
a
,
b
);
}
SIMD_INLINE
v128
v128_sub_32
(
v128
a
,
v128
b
)
{
return
_mm_sub_epi32
(
a
,
b
);
}
SIMD_INLINE
v128
v128_abs_s16
(
v128
a
)
{
...
...
aom_dsp/simd/v256_intrinsics.h
View file @
9b8444a1
...
...
@@ -96,6 +96,7 @@ SIMD_INLINE v256 v256_ssub_u8(v256 a, v256 b) { return c_v256_ssub_u8(a, b); }
SIMD_INLINE
v256
v256_ssub_s8
(
v256
a
,
v256
b
)
{
return
c_v256_ssub_s8
(
a
,
b
);
}
SIMD_INLINE
v256
v256_sub_16
(
v256
a
,
v256
b
)
{
return
c_v256_sub_16
(
a
,
b
);
}
SIMD_INLINE
v256
v256_ssub_s16
(
v256
a
,
v256
b
)
{
return
c_v256_ssub_s16
(
a
,
b
);
}
SIMD_INLINE
v256
v256_ssub_u16
(
v256
a
,
v256
b
)
{
return
c_v256_ssub_u16
(
a
,
b
);
}
SIMD_INLINE
v256
v256_sub_32
(
v256
a
,
v256
b
)
{
return
c_v256_sub_32
(
a
,
b
);
}
SIMD_INLINE
v256
v256_abs_s16
(
v256
a
)
{
return
c_v256_abs_s16
(
a
);
}
...
...
aom_dsp/simd/v256_intrinsics_c.h
View file @
9b8444a1
...
...
@@ -239,6 +239,11 @@ SIMD_INLINE c_v256 c_v256_ssub_s16(c_v256 a, c_v256 b) {
c_v128_ssub_s16
(
a
.
v128
[
0
],
b
.
v128
[
0
]));
}
SIMD_INLINE
c_v256
c_v256_ssub_u16
(
c_v256
a
,
c_v256
b
)
{
return
c_v256_from_v128
(
c_v128_ssub_u16
(
a
.
v128
[
1
],
b
.
v128
[
1
]),
c_v128_ssub_u16
(
a
.
v128
[
0
],
b
.
v128
[
0
]));
}
SIMD_INLINE
c_v256
c_v256_sub_32
(
c_v256
a
,
c_v256
b
)
{
return
c_v256_from_v128
(
c_v128_sub_32
(
a
.
v128
[
1
],
b
.
v128
[
1
]),
c_v128_sub_32
(
a
.
v128
[
0
],
b
.
v128
[
0
]));
...
...
aom_dsp/simd/v256_intrinsics_v128.h
View file @
9b8444a1
...
...
@@ -199,6 +199,10 @@ SIMD_INLINE v256 v256_ssub_s16(v256 a, v256 b) {
return
v256_from_v128
(
v128_ssub_s16
(
a
.
hi
,
b
.
hi
),
v128_ssub_s16
(
a
.
lo
,
b
.
lo
));
}
SIMD_INLINE
v256
v256_ssub_u16
(
v256
a
,
v256
b
)
{
return
v256_from_v128
(
v128_ssub_u16
(
a
.
hi
,
b
.
hi
),
v128_ssub_u16
(
a
.
lo
,
b
.
lo
));
}
SIMD_INLINE
v256
v256_sub_32
(
v256
a
,
v256
b
)
{
return
v256_from_v128
(
v128_sub_32
(
a
.
hi
,
b
.
hi
),
v128_sub_32
(
a
.
lo
,
b
.
lo
));
}
...
...
aom_dsp/simd/v256_intrinsics_x86.h
View file @
9b8444a1
...
...
@@ -110,6 +110,10 @@ SIMD_INLINE v256 v256_ssub_s16(v256 a, v256 b) {
return
_mm256_subs_epi16
(
a
,
b
);
}
SIMD_INLINE
v256
v256_ssub_u16
(
v256
a
,
v256
b
)
{
return
_mm256_subs_epu16
(
a
,
b
);
}
SIMD_INLINE
v256
v256_sub_32
(
v256
a
,
v256
b
)
{
return
_mm256_sub_epi32
(
a
,
b
);
}
SIMD_INLINE
v256
v256_abs_s16
(
v256
a
)
{
return
_mm256_abs_epi16
(
a
);
}
...
...
aom_dsp/simd/v64_intrinsics.h
View file @
9b8444a1
...
...
@@ -78,6 +78,7 @@ SIMD_INLINE v64 v64_ssub_u8(v64 a, v64 b) { return c_v64_ssub_u8(a, b); }
SIMD_INLINE
v64
v64_ssub_s8
(
v64
a
,
v64
b
)
{
return
c_v64_ssub_s8
(
a
,
b
);
}
SIMD_INLINE
v64
v64_sub_16
(
v64
a
,
v64
b
)
{
return
c_v64_sub_16
(
a
,
b
);
}
SIMD_INLINE
v64
v64_ssub_s16
(
v64
a
,
v64
b
)
{
return
c_v64_ssub_s16
(
a
,
b
);
}
SIMD_INLINE
v64
v64_ssub_u16
(
v64
a
,
v64
b
)
{
return
c_v64_ssub_u16
(
a
,
b
);
}
SIMD_INLINE
v64
v64_sub_32
(
v64
a
,
v64
b
)
{
return
c_v64_sub_32
(
a
,
b
);
}
SIMD_INLINE
v64
v64_abs_s16
(
v64
a
)
{
return
c_v64_abs_s16
(
a
);
}
...
...
aom_dsp/simd/v64_intrinsics_arm.h
View file @
9b8444a1
...
...
@@ -218,6 +218,11 @@ SIMD_INLINE v64 v64_ssub_s16(v64 x, v64 y) {
vqsub_s16
(
vreinterpret_s16_s64
(
x
),
vreinterpret_s16_s64
(
y
)));
}
SIMD_INLINE
v64
v64_ssub_u16
(
v64
x
,
v64
y
)
{
return
vreinterpret_s64_u16
(
vqsub_u16
(
vreinterpret_u16_s64
(
x
),
vreinterpret_u16_s64
(
y
)));
}
SIMD_INLINE
v64
v64_ssub_u8
(
v64
x
,
v64
y
)
{
return
vreinterpret_s64_u8
(
vqsub_u8
(
vreinterpret_u8_s64
(
x
),
vreinterpret_u8_s64
(
y
)));
...
...
aom_dsp/simd/v64_intrinsics_c.h
View file @
9b8444a1
...
...
@@ -240,6 +240,15 @@ SIMD_INLINE c_v64 c_v64_ssub_s16(c_v64 a, c_v64 b) {
return
t
;
}
SIMD_INLINE
c_v64
c_v64_ssub_u16
(
c_v64
a
,
c_v64
b
)
{
c_v64
t
;
int
c
;
for
(
c
=
0
;
c
<
4
;
c
++
)
t
.
u16
[
c
]
=
(
int32_t
)
a
.
u16
[
c
]
-
(
int32_t
)
b
.
u16
[
c
]
<
0
?
0
:
a
.
u16
[
c
]
-
b
.
u16
[
c
];
return
t
;
}
SIMD_INLINE
c_v64
c_v64_sub_32
(
c_v64
a
,
c_v64
b
)
{
c_v64
t
;
t
.
u32
[
0
]
=
(
uint32_t
)((
int64_t
)
a
.
u32
[
0
]
-
b
.
u32
[
0
]);
...
...
aom_dsp/simd/v64_intrinsics_x86.h
View file @
9b8444a1
...
...
@@ -126,6 +126,8 @@ SIMD_INLINE v64 v64_sub_16(v64 a, v64 b) { return _mm_sub_epi16(a, b); }
SIMD_INLINE
v64
v64_ssub_s16
(
v64
a
,
v64
b
)
{
return
_mm_subs_epi16
(
a
,
b
);
}
SIMD_INLINE
v64
v64_ssub_u16
(
v64
a
,
v64
b
)
{
return
_mm_subs_epu16
(
a
,
b
);
}
SIMD_INLINE
v64
v64_sub_32
(
v64
a
,
v64
b
)
{
return
_mm_sub_epi32
(
a
,
b
);
}
SIMD_INLINE
v64
v64_abs_s16
(
v64
a
)
{
...
...
test/simd_cmp_impl.h
View file @
9b8444a1
...
...
@@ -278,6 +278,7 @@ const mapping m[] = { MAP(v64_sad_u8),
MAP
(
v64_ssub_s8
),
MAP
(
v64_sub_16
),
MAP
(
v64_ssub_s16
),
MAP
(
v64_ssub_u16
),
MAP
(
v64_sub_32
),
MAP
(
v64_ziplo_8
),
MAP
(
v64_ziphi_8
),
...
...
@@ -449,6 +450,7 @@ const mapping m[] = { MAP(v64_sad_u8),
MAP
(
v128_ssub_s8
),
MAP
(
v128_sub_16
),
MAP
(
v128_ssub_s16
),
MAP
(
v128_ssub_u16
),
MAP
(
v128_sub_32
),
MAP
(
v128_ziplo_8
),
MAP
(
v128_ziphi_8
),
...
...
test/simd_impl.h
View file @
9b8444a1
...
...
@@ -236,32 +236,33 @@ INSTANTIATE(
SIMD_TUPLE
(
v64_add_32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_sub_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_ssub_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_ssub_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_sub_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_ssub_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_sub_
32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
ziplo_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
hi
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
lo_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
hi
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
lo_32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
hi
_32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
pack_s32_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_pack_s
16_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_pack_s16_
s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
unziphi_
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unzip
lo
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unzip
hi_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unzip
lo
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
or
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
x
or
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
and
,
0U
,
0U
),
SIMD_TUPLE
(
v64_and
n
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
mullo_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_mul
hi
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_mul
lo_s32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
add_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_madd_
us8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
avg
_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
rd
avg_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_avg_u
16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
min_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
ax
_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
in_s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
ax
_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
in_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
ax
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
cmpgt_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
l
t_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
eq_
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
gt_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
l
t_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
eq_
16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
shuffle_8
,
7
U
,
8
U
));
SIMD_TUPLE
(
v64_
s
sub_
u16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
sub_32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
lo
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
hi_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
lo
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
hi_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_zip
lo
_32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
ziphi_32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_pack_s
32_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_pack_s16_
u
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
pack_s16_s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unzip
hi
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unzip
lo_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unzip
hi
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
unziplo_16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_or
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
xor
,
0U
,
0U
),
SIMD_TUPLE
(
v64_and
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
andn
,
0U
,
0U
),
SIMD_TUPLE
(
v64_mul
lo
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_mul
hi_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
ullo_s32
,
0U
,
0U
),
SIMD_TUPLE
(
v64_madd_
s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
madd
_u
s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_avg_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
rd
avg_u
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
avg_u16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
in
_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
ax_u
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
in
_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
ax_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_m
in
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
max_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
g
t_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
lt_s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
eq_8
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
g
t_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_cmp
lt_s
16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_
cmpeq_16
,
0
U
,
0
U
));
INSTANTIATE
(
ARCH
,
ARCH_POSTFIX
(
V64_V64V64_Part2
),
SIMD_TUPLE
(
imm_v64_align
<
1
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
2
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
3
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
4
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
5
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
6
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
7
>
,
0U
,
0U
));
ARCH
,
ARCH_POSTFIX
(
V64_V64V64_Part2
),
SIMD_TUPLE
(
v64_shuffle_8
,
7U
,
8U
),
SIMD_TUPLE
(
imm_v64_align
<
1
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
2
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
3
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
4
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
5
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
6
>
,
0U
,
0U
),
SIMD_TUPLE
(
imm_v64_align
<
7
>
,
0U
,
0U
));
INSTANTIATE
(
ARCH
,
ARCH_POSTFIX
(
V64_V64
),
SIMD_TUPLE
(
v64_abs_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v64_unpacklo_u8_s16
,
0U
,
0U
),
...
...
@@ -394,29 +395,30 @@ INSTANTIATE(
SIMD_TUPLE
(
v128_add_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_sub_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_ssub_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_ssub_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_sub_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_ssub_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_sub_
32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
ziplo_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi
_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo_64
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi
_64
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
un
ziphi_
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
lo
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
hi_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
lo
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
hi_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
lo
_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
pack_s32_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_pack_s
16_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_pack_s16_
s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
or
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
x
or
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
and
,
0U
,
0U
),
SIMD_TUPLE
(
v128_and
n
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
mullo_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_mul
hi
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_mul
lo_s32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
add_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_madd_
us8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
avg
_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
rd
avg_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_avg_u
16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
min_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
ax
_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
in_s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
ax
_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
in_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
ax
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
cmpgt_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
l
t_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
eq_
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
gt_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
eq_
16
,
0U
,
0U
));
SIMD_TUPLE
(
v128_
s
sub_
u16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
sub_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo
_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
hi_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_zip
lo
_64
,
0U
,
0U
),
SIMD_TUPLE
(
v128_ziphi_
64
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
hi
_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
lo_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
hi
_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
lo_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_unzip
hi
_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
unziplo_32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_pack_s
32_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_pack_s16_
u
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
pack_s16_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_or
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
xor
,
0U
,
0U
),
SIMD_TUPLE
(
v128_and
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
andn
,
0U
,
0U
),
SIMD_TUPLE
(
v128_mul
lo
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_mul
hi_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
ullo_s32
,
0U
,
0U
),
SIMD_TUPLE
(
v128_madd_
s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
madd
_u
s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_avg_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
rd
avg_u
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
avg_u16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
in
_u8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
ax_u
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
in
_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
ax_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_m
in
_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_
max_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
g
t_s8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
lt_s
8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
eq_8
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmp
gt_s
16
,
0U
,
0U
));
INSTANTIATE
(
ARCH
,
ARCH_POSTFIX
(
V128_V128V128_Part2
),
SIMD_TUPLE
(
v128_cmpeq_16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_cmplt_s16
,
0U
,
0U
),
SIMD_TUPLE
(
v128_shuffle_8
,
15U
,
8U
),
SIMD_TUPLE
(
imm_v128_align
<
1
>
,
0U
,
0U
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment