Commit 73aa77c0 authored by Steinar Midtskogen's avatar Steinar Midtskogen
Browse files

Increase parallelism in CLPF SIMD

Change-Id: I66cdb67f8a1c2072516a65822dcc838e516ba9d7
parent 099220bc
...@@ -852,9 +852,10 @@ add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint ...@@ -852,9 +852,10 @@ add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint
specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/; specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
if (aom_config("CONFIG_CDEF") eq "yes") { if (aom_config("CONFIG_CDEF") eq "yes") {
add_proto qw/void aom_clpf_block_hbd/, "const uint16_t *src, uint16_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, unsigned int bd"; add_proto qw/void aom_clpf_block_hbd/, "uint16_t *dst, const uint16_t *src, int dstride, int sstride, int sizex, int sizey, unsigned int strength, unsigned int bd";
add_proto qw/void aom_clpf_hblock_hbd/, "const uint16_t *src, uint16_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, unsigned int bd"; add_proto qw/void aom_clpf_hblock_hbd/, "uint16_t *dst, const uint16_t *src, int dstride, int sstride, int sizex, int sizey, unsigned int strength, unsigned int bd";
add_proto qw/void aom_clpf_block/, "const uint8_t *src, uint8_t *dst, int sstride, int dstride, int x0, int y0, int sizex, int sizey, unsigned int strength, unsigned int bd"; add_proto qw/void aom_clpf_block/, "uint8_t *dst, const uint16_t *src, int dstride, int sstride, int sizex, int sizey, unsigned int strength, unsigned int bd";
add_proto qw/void aom_clpf_hblock/, "uint8_t *dst, const uint16_t *src, int dstride, int sstride, int sizex, int sizey, unsigned int strength, unsigned int bd";
# VS compiling for 32 bit targets does not support vector types in # VS compiling for 32 bit targets does not support vector types in
# structs as arguments, which makes the v256 type of the intrinsics # structs as arguments, which makes the v256 type of the intrinsics
# hard to support, so optimizations for this target are disabled. # hard to support, so optimizations for this target are disabled.
...@@ -862,6 +863,7 @@ if (aom_config("CONFIG_CDEF") eq "yes") { ...@@ -862,6 +863,7 @@ if (aom_config("CONFIG_CDEF") eq "yes") {
specialize qw/aom_clpf_block_hbd sse2 ssse3 sse4_1 neon/; specialize qw/aom_clpf_block_hbd sse2 ssse3 sse4_1 neon/;
specialize qw/aom_clpf_hblock_hbd sse2 ssse3 sse4_1 neon/; specialize qw/aom_clpf_hblock_hbd sse2 ssse3 sse4_1 neon/;
specialize qw/aom_clpf_block sse2 ssse3 sse4_1 neon/; specialize qw/aom_clpf_block sse2 ssse3 sse4_1 neon/;
specialize qw/aom_clpf_hblock sse2 ssse3 sse4_1 neon/;
} }
} }
......
...@@ -38,13 +38,13 @@ int av1_clpf_hsample(int X, int A, int B, int C, int D, int s, ...@@ -38,13 +38,13 @@ int av1_clpf_hsample(int X, int A, int B, int C, int D, int s,
return (4 + delta - (delta < 0)) >> 3; return (4 + delta - (delta < 0)) >> 3;
} }
void aom_clpf_block_c(const uint8_t *src, uint8_t *dst, int sstride, void aom_clpf_block_c(uint8_t *dst, const uint16_t *src, int dstride,
int dstride, int x0, int y0, int sizex, int sizey, int sstride, int sizex, int sizey, unsigned int strength,
unsigned int strength, unsigned int damping) { unsigned int damping) {
int x, y; int x, y;
for (y = y0; y < y0 + sizey; y++) { for (y = 0; y < sizey; y++) {
for (x = x0; x < x0 + sizex; x++) { for (x = 0; x < sizex; x++) {
const int X = src[y * sstride + x]; const int X = src[y * sstride + x];
const int A = src[(y - 2) * sstride + x]; const int A = src[(y - 2) * sstride + x];
const int B = src[(y - 1) * sstride + x]; const int B = src[(y - 1) * sstride + x];
...@@ -61,15 +61,14 @@ void aom_clpf_block_c(const uint8_t *src, uint8_t *dst, int sstride, ...@@ -61,15 +61,14 @@ void aom_clpf_block_c(const uint8_t *src, uint8_t *dst, int sstride,
} }
} }
// Identical to aom_clpf_block_c() apart from "src" and "dst". // Identical to aom_clpf_block_c() apart from "dst".
// TODO(stemidts): Put under CONFIG_AOM_HIGHBITDEPTH if CDEF do 8 bit internally void aom_clpf_block_hbd_c(uint16_t *dst, const uint16_t *src, int dstride,
void aom_clpf_block_hbd_c(const uint16_t *src, uint16_t *dst, int sstride, int sstride, int sizex, int sizey,
int dstride, int x0, int y0, int sizex, int sizey,
unsigned int strength, unsigned int damping) { unsigned int strength, unsigned int damping) {
int x, y; int x, y;
for (y = y0; y < y0 + sizey; y++) { for (y = 0; y < sizey; y++) {
for (x = x0; x < x0 + sizex; x++) { for (x = 0; x < sizex; x++) {
const int X = src[y * sstride + x]; const int X = src[y * sstride + x];
const int A = src[(y - 2) * sstride + x]; const int A = src[(y - 2) * sstride + x];
const int B = src[(y - 1) * sstride + x]; const int B = src[(y - 1) * sstride + x];
...@@ -86,14 +85,32 @@ void aom_clpf_block_hbd_c(const uint16_t *src, uint16_t *dst, int sstride, ...@@ -86,14 +85,32 @@ void aom_clpf_block_hbd_c(const uint16_t *src, uint16_t *dst, int sstride,
} }
} }
// TODO(stemidts): Put under CONFIG_AOM_HIGHBITDEPTH if CDEF do 8 bit internally // Vertically restricted filter
void aom_clpf_hblock_hbd_c(const uint16_t *src, uint16_t *dst, int sstride, void aom_clpf_hblock_c(uint8_t *dst, const uint16_t *src, int dstride,
int dstride, int x0, int y0, int sizex, int sizey, int sstride, int sizex, int sizey, unsigned int strength,
unsigned int damping) {
int x, y;
for (y = 0; y < sizey; y++) {
for (x = 0; x < sizex; x++) {
const int X = src[y * sstride + x];
const int A = src[y * sstride + x - 2];
const int B = src[y * sstride + x - 1];
const int C = src[y * sstride + x + 1];
const int D = src[y * sstride + x + 2];
const int delta = av1_clpf_hsample(X, A, B, C, D, strength, damping);
dst[y * dstride + x] = X + delta;
}
}
}
void aom_clpf_hblock_hbd_c(uint16_t *dst, const uint16_t *src, int dstride,
int sstride, int sizex, int sizey,
unsigned int strength, unsigned int damping) { unsigned int strength, unsigned int damping) {
int x, y; int x, y;
for (y = y0; y < y0 + sizey; y++) { for (y = 0; y < sizey; y++) {
for (x = x0; x < x0 + sizex; x++) { for (x = 0; x < sizex; x++) {
const int X = src[y * sstride + x]; const int X = src[y * sstride + x];
const int A = src[y * sstride + x - 2]; const int A = src[y * sstride + x - 2];
const int B = src[y * sstride + x - 1]; const int B = src[y * sstride + x - 1];
......
...@@ -15,32 +15,29 @@ ...@@ -15,32 +15,29 @@
#include "av1/common/clpf_simd_kernel.h" #include "av1/common/clpf_simd_kernel.h"
// Process blocks of width 8, two lines at a time, 8 bit. // Process blocks of width 8, two lines at a time, 8 bit.
static void clpf_block8(const uint8_t *src, uint8_t *dst, int sstride, static void clpf_block8(uint8_t *dst, const uint16_t *src, int dstride,
int dstride, int x0, int y0, int sizey, int sstride, int sizey, unsigned int strength,
unsigned int strength, unsigned int dmp) { unsigned int dmp) {
int y; int y;
dst += x0 + y0 * dstride;
src += x0 + y0 * sstride;
for (y = 0; y < sizey; y += 2) { for (y = 0; y < sizey; y += 2) {
const v64 l1 = v64_load_aligned(src); const v128 l1 = v128_load_aligned(src);
const v64 l2 = v64_load_aligned(src + sstride); const v128 l2 = v128_load_aligned(src + sstride);
const v64 l3 = v64_load_aligned(src - sstride); const v128 l3 = v128_load_aligned(src - sstride);
const v64 l4 = v64_load_aligned(src + 2 * sstride); const v128 l4 = v128_load_aligned(src + 2 * sstride);
const v128 a = v128_from_v64(v64_load_aligned(src - 2 * sstride), l3); const v128 a = v128_pack_s16_u8(v128_load_aligned(src - 2 * sstride), l3);
const v128 b = v128_from_v64(l3, l1); const v128 b = v128_pack_s16_u8(l3, l1);
const v128 g = v128_from_v64(l2, l4); const v128 g = v128_pack_s16_u8(l2, l4);
const v128 h = v128_from_v64(l4, v64_load_aligned(src + 3 * sstride)); const v128 h = v128_pack_s16_u8(l4, v128_load_aligned(src + 3 * sstride));
const v128 c = v128_from_v64(v64_load_unaligned(src - 2), const v128 c = v128_pack_s16_u8(v128_load_unaligned(src - 2),
v64_load_unaligned(src - 2 + sstride)); v128_load_unaligned(src - 2 + sstride));
const v128 d = v128_from_v64(v64_load_unaligned(src - 1), const v128 d = v128_pack_s16_u8(v128_load_unaligned(src - 1),
v64_load_unaligned(src - 1 + sstride)); v128_load_unaligned(src - 1 + sstride));
const v128 e = v128_from_v64(v64_load_unaligned(src + 1), const v128 e = v128_pack_s16_u8(v128_load_unaligned(src + 1),
v64_load_unaligned(src + 1 + sstride)); v128_load_unaligned(src + 1 + sstride));
const v128 f = v128_from_v64(v64_load_unaligned(src + 2), const v128 f = v128_pack_s16_u8(v128_load_unaligned(src + 2),
v64_load_unaligned(src + 2 + sstride)); v128_load_unaligned(src + 2 + sstride));
const v128 o = calc_delta(v128_from_v64(l1, l2), a, b, c, d, e, f, g, h, const v128 o = calc_delta(v128_pack_s16_u8(l1, l2), a, b, c, d, e, f, g, h,
strength, dmp); strength, dmp);
v64_store_aligned(dst, v128_high_v64(o)); v64_store_aligned(dst, v128_high_v64(o));
...@@ -51,46 +48,124 @@ static void clpf_block8(const uint8_t *src, uint8_t *dst, int sstride, ...@@ -51,46 +48,124 @@ static void clpf_block8(const uint8_t *src, uint8_t *dst, int sstride,
} }
// Process blocks of width 4, four lines at a time, 8 bit. // Process blocks of width 4, four lines at a time, 8 bit.
static void clpf_block4(const uint8_t *src, uint8_t *dst, int sstride, static void clpf_block4(uint8_t *dst, const uint16_t *src, int dstride,
int dstride, int x0, int y0, int sizey, int sstride, int sizey, unsigned int strength,
unsigned int strength, unsigned int dmp) { unsigned int dmp) {
int y; int y;
dst += x0 + y0 * dstride; for (y = 0; y < sizey; y += 4) {
src += x0 + y0 * sstride; const v64 l0 = v64_load_aligned(src - 2 * sstride);
const v64 l1 = v64_load_aligned(src - sstride);
const v64 l2 = v64_load_aligned(src);
const v64 l3 = v64_load_aligned(src + sstride);
const v64 l4 = v64_load_aligned(src + 2 * sstride);
const v64 l5 = v64_load_aligned(src + 3 * sstride);
const v64 l6 = v64_load_aligned(src + 4 * sstride);
const v64 l7 = v64_load_aligned(src + 5 * sstride);
const v128 a =
v128_pack_s16_u8(v128_from_v64(l0, l1), v128_from_v64(l2, l3));
const v128 b =
v128_pack_s16_u8(v128_from_v64(l1, l2), v128_from_v64(l3, l4));
const v128 g =
v128_pack_s16_u8(v128_from_v64(l3, l4), v128_from_v64(l5, l6));
const v128 h =
v128_pack_s16_u8(v128_from_v64(l4, l5), v128_from_v64(l6, l7));
const v128 c = v128_pack_s16_u8(
v128_from_v64(v64_load_unaligned(src - 2),
v64_load_unaligned(src + sstride - 2)),
v128_from_v64(v64_load_unaligned(src + 2 * sstride - 2),
v64_load_unaligned(src + 3 * sstride - 2)));
const v128 d = v128_pack_s16_u8(
v128_from_v64(v64_load_unaligned(src - 1),
v64_load_unaligned(src + sstride - 1)),
v128_from_v64(v64_load_unaligned(src + 2 * sstride - 1),
v64_load_unaligned(src + 3 * sstride - 1)));
const v128 e = v128_pack_s16_u8(
v128_from_v64(v64_load_unaligned(src + 1),
v64_load_unaligned(src + sstride + 1)),
v128_from_v64(v64_load_unaligned(src + 2 * sstride + 1),
v64_load_unaligned(src + 3 * sstride + 1)));
const v128 f = v128_pack_s16_u8(
v128_from_v64(v64_load_unaligned(src + 2),
v64_load_unaligned(src + sstride + 2)),
v128_from_v64(v64_load_unaligned(src + 2 * sstride + 2),
v64_load_unaligned(src + 3 * sstride + 2)));
const v128 o = calc_delta(
v128_pack_s16_u8(v128_from_v64(l2, l3), v128_from_v64(l4, l5)), a, b, c,
d, e, f, g, h, strength, dmp);
u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12)));
u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8)));
u32_store_aligned(dst + 2 * dstride, v128_low_u32(v128_shr_n_byte(o, 4)));
u32_store_aligned(dst + 3 * dstride, v128_low_u32(o));
dst += 4 * dstride;
src += 4 * sstride;
}
}
static void clpf_hblock8(uint8_t *dst, const uint16_t *src, int dstride,
int sstride, int sizey, unsigned int strength,
unsigned int dmp) {
int y;
for (y = 0; y < sizey; y += 2) {
const v128 l1 = v128_load_aligned(src);
const v128 l2 = v128_load_aligned(src + sstride);
const v128 a = v128_pack_s16_u8(v128_load_unaligned(src - 2),
v128_load_unaligned(src - 2 + sstride));
const v128 b = v128_pack_s16_u8(v128_load_unaligned(src - 1),
v128_load_unaligned(src - 1 + sstride));
const v128 c = v128_pack_s16_u8(v128_load_unaligned(src + 1),
v128_load_unaligned(src + 1 + sstride));
const v128 d = v128_pack_s16_u8(v128_load_unaligned(src + 2),
v128_load_unaligned(src + 2 + sstride));
const v128 o =
calc_hdelta(v128_pack_s16_u8(l1, l2), a, b, c, d, strength, dmp);
v64_store_aligned(dst, v128_high_v64(o));
v64_store_aligned(dst + dstride, v128_low_v64(o));
src += sstride * 2;
dst += dstride * 2;
}
}
// Process blocks of width 4, four lines at a time, 8 bit.
static void clpf_hblock4(uint8_t *dst, const uint16_t *src, int dstride,
int sstride, int sizey, unsigned int strength,
unsigned int dmp) {
int y;
for (y = 0; y < sizey; y += 4) { for (y = 0; y < sizey; y += 4) {
const uint32_t l0 = u32_load_aligned(src - 2 * sstride); const v64 l0 = v64_load_aligned(src);
const uint32_t l1 = u32_load_aligned(src - sstride); const v64 l1 = v64_load_aligned(src + sstride);
const uint32_t l2 = u32_load_aligned(src); const v64 l2 = v64_load_aligned(src + 2 * sstride);
const uint32_t l3 = u32_load_aligned(src + sstride); const v64 l3 = v64_load_aligned(src + 3 * sstride);
const uint32_t l4 = u32_load_aligned(src + 2 * sstride); const v128 a = v128_pack_s16_u8(
const uint32_t l5 = u32_load_aligned(src + 3 * sstride); v128_from_v64(v64_load_unaligned(src - 2),
const uint32_t l6 = u32_load_aligned(src + 4 * sstride); v64_load_unaligned(src + sstride - 2)),
const uint32_t l7 = u32_load_aligned(src + 5 * sstride); v128_from_v64(v64_load_unaligned(src + 2 * sstride - 2),
const v128 a = v128_from_32(l0, l1, l2, l3); v64_load_unaligned(src + 3 * sstride - 2)));
const v128 b = v128_from_32(l1, l2, l3, l4); const v128 b = v128_pack_s16_u8(
const v128 g = v128_from_32(l3, l4, l5, l6); v128_from_v64(v64_load_unaligned(src - 1),
const v128 h = v128_from_32(l4, l5, l6, l7); v64_load_unaligned(src + sstride - 1)),
const v128 c = v128_from_32(u32_load_unaligned(src - 2), v128_from_v64(v64_load_unaligned(src + 2 * sstride - 1),
u32_load_unaligned(src + sstride - 2), v64_load_unaligned(src + 3 * sstride - 1)));
u32_load_unaligned(src + 2 * sstride - 2), const v128 c = v128_pack_s16_u8(
u32_load_unaligned(src + 3 * sstride - 2)); v128_from_v64(v64_load_unaligned(src + 1),
const v128 d = v128_from_32(u32_load_unaligned(src - 1), v64_load_unaligned(src + sstride + 1)),
u32_load_unaligned(src + sstride - 1), v128_from_v64(v64_load_unaligned(src + 2 * sstride + 1),
u32_load_unaligned(src + 2 * sstride - 1), v64_load_unaligned(src + 3 * sstride + 1)));
u32_load_unaligned(src + 3 * sstride - 1)); const v128 d = v128_pack_s16_u8(
const v128 e = v128_from_32(u32_load_unaligned(src + 1), v128_from_v64(v64_load_unaligned(src + 2),
u32_load_unaligned(src + sstride + 1), v64_load_unaligned(src + sstride + 2)),
u32_load_unaligned(src + 2 * sstride + 1), v128_from_v64(v64_load_unaligned(src + 2 * sstride + 2),
u32_load_unaligned(src + 3 * sstride + 1)); v64_load_unaligned(src + 3 * sstride + 2)));
const v128 f = v128_from_32(u32_load_unaligned(src + 2),
u32_load_unaligned(src + sstride + 2), const v128 o = calc_hdelta(
u32_load_unaligned(src + 2 * sstride + 2), v128_pack_s16_u8(v128_from_v64(l0, l1), v128_from_v64(l2, l3)), a, b, c,
u32_load_unaligned(src + 3 * sstride + 2)); d, strength, dmp);
const v128 o = calc_delta(v128_from_32(l2, l3, l4, l5), a, b, c, d, e, f, g,
h, strength, dmp);
u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12))); u32_store_aligned(dst, v128_low_u32(v128_shr_n_byte(o, 12)));
u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8))); u32_store_aligned(dst + dstride, v128_low_u32(v128_shr_n_byte(o, 8)));
...@@ -102,23 +177,34 @@ static void clpf_block4(const uint8_t *src, uint8_t *dst, int sstride, ...@@ -102,23 +177,34 @@ static void clpf_block4(const uint8_t *src, uint8_t *dst, int sstride,
} }
} }
void SIMD_FUNC(aom_clpf_block)(const uint8_t *src, uint8_t *dst, int sstride, void SIMD_FUNC(aom_clpf_block)(uint8_t *dst, const uint16_t *src, int dstride,
int dstride, int x0, int y0, int sizex, int sstride, int sizex, int sizey,
int sizey, unsigned int strength, unsigned int strength, unsigned int dmp) {
unsigned int dmp) {
if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) { if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) {
// Fallback to C for odd sizes: // Fallback to C for odd sizes:
// * block widths not 4 or 8 // * block widths not 4 or 8
// * block heights not a multiple of 4 if the block width is 4 // * block heights not a multiple of 4 if the block width is 4
aom_clpf_block_c(src, dst, sstride, dstride, x0, y0, sizex, sizey, strength, aom_clpf_block_c(dst, src, dstride, sstride, sizex, sizey, strength, dmp);
dmp);
} else { } else {
(sizex == 4 ? clpf_block4 : clpf_block8)(src, dst, sstride, dstride, x0, y0, (sizex == 4 ? clpf_block4 : clpf_block8)(dst, src, dstride, sstride, sizey,
sizey, strength, dmp); strength, dmp);
}
}
void SIMD_FUNC(aom_clpf_hblock)(uint8_t *dst, const uint16_t *src, int dstride,
int sstride, int sizex, int sizey,
unsigned int strength, unsigned int dmp) {
if ((sizex != 4 && sizex != 8) || ((sizey & 3) && sizex == 4)) {
// Fallback to C for odd sizes:
// * block widths not 4 or 8
// * block heights not a multiple of 4 if the block width is 4
aom_clpf_hblock_c(dst, src, dstride, sstride, sizex, sizey, strength, dmp);
} else {
(sizex == 4 ? clpf_hblock4 : clpf_hblock8)(dst, src, dstride, sstride,
sizey, strength, dmp);
} }
} }
#if defined(CONFIG_AOM_HIGHBITDEPTH)
// sign(a - b) * max(0, abs(a - b) - max(0, abs(a - b) - // sign(a - b) * max(0, abs(a - b) - max(0, abs(a - b) -
// strength + (abs(a - b) >> (dmp - log2(s))))) // strength + (abs(a - b) >> (dmp - log2(s)))))
SIMD_INLINE v128 constrain_hbd(v128 a, v128 b, unsigned int strength, SIMD_INLINE v128 constrain_hbd(v128 a, v128 b, unsigned int strength,
...@@ -206,15 +292,11 @@ static void calc_hdelta_hbd8(v128 o, v128 a, v128 b, v128 c, v128 d, ...@@ -206,15 +292,11 @@ static void calc_hdelta_hbd8(v128 o, v128 a, v128 b, v128 c, v128 d,
} }
// Process blocks of width 4, two lines at time. // Process blocks of width 4, two lines at time.
SIMD_INLINE void clpf_block_hbd4(const uint16_t *src, uint16_t *dst, SIMD_INLINE void clpf_block_hbd4(uint16_t *dst, const uint16_t *src,
int sstride, int dstride, int x0, int y0, int dstride, int sstride, int sizey,
int sizey, unsigned int strength, unsigned int strength, unsigned int dmp) {
unsigned int dmp) {
int y; int y;
dst += x0 + y0 * dstride;
src += x0 + y0 * sstride;
for (y = 0; y < sizey; y += 2) { for (y = 0; y < sizey; y += 2) {
const v64 l1 = v64_load_aligned(src); const v64 l1 = v64_load_aligned(src);
const v64 l2 = v64_load_aligned(src + sstride); const v64 l2 = v64_load_aligned(src + sstride);
...@@ -241,14 +323,11 @@ SIMD_INLINE void clpf_block_hbd4(const uint16_t *src, uint16_t *dst, ...@@ -241,14 +323,11 @@ SIMD_INLINE void clpf_block_hbd4(const uint16_t *src, uint16_t *dst,
} }
// The most simple case. Start here if you need to understand the functions. // The most simple case. Start here if you need to understand the functions.
SIMD_INLINE void clpf_block_hbd(const uint16_t *src, uint16_t *dst, int sstride, SIMD_INLINE void clpf_block_hbd(uint16_t *dst, const uint16_t *src, int dstride,
int dstride, int x0, int y0, int sizey, int sstride, int sizey, unsigned int strength,
unsigned int strength, unsigned int dmp) { unsigned int dmp) {
int y; int y;
dst += x0 + y0 * dstride;
src += x0 + y0 * sstride;
for (y = 0; y < sizey; y++) { for (y = 0; y < sizey; y++) {
const v128 o = v128_load_aligned(src); const v128 o = v128_load_aligned(src);
const v128 a = v128_load_aligned(src - 2 * sstride); const v128 a = v128_load_aligned(src - 2 * sstride);
...@@ -267,15 +346,11 @@ SIMD_INLINE void clpf_block_hbd(const uint16_t *src, uint16_t *dst, int sstride, ...@@ -267,15 +346,11 @@ SIMD_INLINE void clpf_block_hbd(const uint16_t *src, uint16_t *dst, int sstride,
} }
// Process blocks of width 4, horizontal filter, two lines at time. // Process blocks of width 4, horizontal filter, two lines at time.
SIMD_INLINE void clpf_hblock_hbd4(const uint16_t *src, uint16_t *dst, SIMD_INLINE void clpf_hblock_hbd4(uint16_t *dst, const uint16_t *src,
int sstride, int dstride, int x0, int y0, int dstride, int sstride, int sizey,
int sizey, unsigned int strength, unsigned int strength, unsigned int dmp) {
unsigned int dmp) {
int y; int y;
dst += x0 + y0 * dstride;
src += x0 + y0 * sstride;
for (y = 0; y < sizey; y += 2) { for (y = 0; y < sizey; y += 2) {
const v128 a = v128_from_v64(v64_load_unaligned(src - 2), const v128 a = v128_from_v64(v64_load_unaligned(src - 2),
v64_load_unaligned(src - 2 + sstride)); v64_load_unaligned(src - 2 + sstride));
...@@ -295,15 +370,11 @@ SIMD_INLINE void clpf_hblock_hbd4(const uint16_t *src, uint16_t *dst, ...@@ -295,15 +370,11 @@ SIMD_INLINE void clpf_hblock_hbd4(const uint16_t *src, uint16_t *dst,
} }
// Process blocks of width 8, horizontal filter, two lines at time. // Process blocks of width 8, horizontal filter, two lines at time.
SIMD_INLINE void clpf_hblock_hbd(const uint16_t *src, uint16_t *dst, SIMD_INLINE void clpf_hblock_hbd(uint16_t *dst, const uint16_t *src,
int sstride, int dstride, int x0, int y0, int dstride, int sstride, int sizey,
int sizey, unsigned int strength, unsigned int strength, unsigned int dmp) {
unsigned int dmp) {
int y; int y;
dst += x0 + y0 * dstride;
src += x0 + y0 * sstride;
for (y = 0; y < sizey; y++) { for (y = 0; y < sizey; y++) {
const v128 o = v128_load_aligned(src); const v128 o = v128_load_aligned(src);
const v128 a = v128_load_unaligned(src - 2); const v128 a = v128_load_unaligned(src - 2);
...@@ -317,35 +388,34 @@ SIMD_INLINE void clpf_hblock_hbd(const uint16_t *src, uint16_t *dst, ...@@ -317,35 +388,34 @@ SIMD_INLINE void clpf_hblock_hbd(const uint16_t *src, uint16_t *dst,
} }
} }
void SIMD_FUNC(aom_clpf_block_hbd)(const uint16_t *src, uint16_t *dst, void SIMD_FUNC(aom_clpf_block_hbd)(uint16_t *dst, const uint16_t *src,
int sstride, int dstride, int x0, int y0, int dstride, int sstride, int sizex,
int sizex, int sizey, unsigned int strength, int sizey, unsigned int strength,
unsigned int dmp) { unsigned int dmp) {
if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) { if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) {
// Fallback to C for odd sizes: // Fallback to C for odd sizes:
// * block width not 4 or 8 // * block width not 4 or 8
// * block heights not a multiple of 2 if the block width is 4 // * block heights not a multiple of 2 if the block width is 4
aom_clpf_block_hbd_c(src, dst, sstride, dstride, x0, y0, sizex, sizey, aom_clpf_block_hbd_c(dst, src, dstride, sstride, sizex, sizey, strength,
strength, dmp); dmp);
} else { } else {
(sizex == 4 ? clpf_block_hbd4 : clpf_block_hbd)( (sizex == 4 ? clpf_block_hbd4 : clpf_block_hbd)(dst, src, dstride, sstride,
src, dst, sstride, dstride, x0, y0, sizey, strength, dmp); sizey, strength, dmp);
} }
} }
void SIMD_FUNC(aom_clpf_hblock_hbd)(const uint16_t *src, uint16_t *dst, void SIMD_FUNC(aom_clpf_hblock_hbd)(uint16_t *dst, const uint16_t *src,
int sstride, int dstride, int x0, int y0, int dstride, int sstride, int sizex,
int sizex, int sizey, unsigned int strength, int sizey, unsigned int strength,
unsigned int dmp) { unsigned int dmp) {
if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) { if ((sizex != 4 && sizex != 8) || ((sizey & 1) && sizex == 4)) {
// Fallback to C for odd sizes: // Fallback to C for odd sizes:
// * block width not 4 or 8 // * block width not 4 or 8
// * block heights not a multiple of 2 if the block width is 4 // * block heights not a multiple of 2 if the block width is 4
aom_clpf_hblock_hbd_c(src, dst, sstride, dstride, x0, y0, sizex, sizey, aom_clpf_hblock_hbd_c(dst, src, dstride, sstride, sizex, sizey, strength,
strength, dmp); dmp);
} else { } else {
(sizex == 4 ? clpf_hblock_hbd4 : clpf_hblock_hbd)( (sizex == 4 ? clpf_hblock_hbd4 : clpf_hblock_hbd)(
src, dst, sstride, dstride, x0, y0, sizey, strength, dmp); dst, src, dstride, sstride, sizey, strength, dmp);
} }
} }
#endif
...@@ -47,4 +47,19 @@ SIMD_INLINE v128 calc_delta(v128 x, v128 a, v128 b, v128 c, v128 d, v128 e, ...@@ -47,4 +47,19 @@ SIMD_INLINE v128 calc_delta(v128 x, v128 a, v128 b, v128 c, v128 d, v128 e,
4)); 4));
} }
// delta = 1/8 * constrain(a, x, s) + 3/8 * constrain(b, x, s) +
// 3/8 * constrain(c, x, s) + 1/8 * constrain(d, x, s) +
SIMD_INLINE v128 calc_hdelta(v128 x, v128 a, v128 b, v128 c, v128 d,
unsigned int s, unsigned int dmp) {
const v128 bc = v128_add_8(constrain(b, x, s, dmp), constrain(c, x, s, dmp));
const v128 delta =
v128_add_8(v128_add_8(constrain(a, x, s, dmp), constrain(d, x, s, dmp)),
v128_add_8(v128_add_8(bc, bc), bc));
return v128_add_8(
x, v128_shr_s8(
v128_add_8(v128_dup_8(4),
v128_add_8(delta, v128_cmplt_s8(delta, v128_zero()))),