Commit 548671dd authored by Dmitry Kovalev's avatar Dmitry Kovalev
Browse files

Removing vp9_add_constant_residual_{8x8, 16x16, 32x32} functions.

We don't need these functions anymore. The only one which was actually
used is vp9_add_constant_residual_32x32. Addition of
vp9_short_idct32x32_1_add eliminates this single usage. SSE2 optimized
version of vp9_short_idct32x32_1_add will be added in the next patch set,
right now it is only C implementation. Now we have all idct functions
implemented in a consistent manner.

Change-Id: I63df79a13cf62aa2c9360a7a26933c100f9ebda3
parent 0f8805e0
......@@ -1269,8 +1269,18 @@ void vp9_short_idct32x32_add_c(int16_t *input, uint8_t *dest, int dest_stride) {
}
}
void vp9_short_idct1_32x32_c(int16_t *input, int16_t *output) {
void vp9_short_idct32x32_1_add_c(int16_t *input, uint8_t *dest,
int dest_stride) {
int i, j;
int a1;
int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
out = dct_const_round_shift(out * cospi_16_64);
output[0] = ROUND_POWER_OF_TWO(out, 6);
a1 = ROUND_POWER_OF_TWO(out, 6);
for (j = 0; j < 32; ++j) {
for (i = 0; i < 32; ++i)
dest[i] = clip_pixel(dest[i] + a1);
dest += dest_stride;
}
}
......@@ -202,17 +202,6 @@ specialize vp9_dc_left_predictor_32x32
prototype void vp9_dc_128_predictor_32x32 "uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left"
specialize vp9_dc_128_predictor_32x32
if [ "$CONFIG_VP9_DECODER" = "yes" ]; then
prototype void vp9_add_constant_residual_8x8 "const int16_t diff, uint8_t *dest, int stride"
specialize vp9_add_constant_residual_8x8 sse2 neon
prototype void vp9_add_constant_residual_16x16 "const int16_t diff, uint8_t *dest, int stride"
specialize vp9_add_constant_residual_16x16 sse2 neon
prototype void vp9_add_constant_residual_32x32 "const int16_t diff, uint8_t *dest, int stride"
specialize vp9_add_constant_residual_32x32 sse2 neon
fi
#
# Loopfilter
#
......@@ -321,8 +310,8 @@ specialize vp9_short_idct16x16_10_add sse2 neon
prototype void vp9_short_idct32x32_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_idct32x32_add sse2 neon
prototype void vp9_short_idct1_32x32 "int16_t *input, int16_t *output"
specialize vp9_short_idct1_32x32
prototype void vp9_short_idct32x32_1_add "int16_t *input, uint8_t *dest, int dest_stride"
specialize vp9_short_idct32x32_1_add
prototype void vp9_short_iht4x4_add "int16_t *input, uint8_t *dest, int dest_stride, int tx_type"
specialize vp9_short_iht4x4_add sse2 neon
......
;
; Copyright (c) 2013 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
EXPORT |vp9_add_constant_residual_8x8_neon|
EXPORT |vp9_add_constant_residual_16x16_neon|
EXPORT |vp9_add_constant_residual_32x32_neon|
ARM
AREA ||.text||, CODE, READONLY, ALIGN=2
MACRO
LD_16x8 $src, $stride
vld1.8 {q8}, [$src], $stride
vld1.8 {q9}, [$src], $stride
vld1.8 {q10}, [$src], $stride
vld1.8 {q11}, [$src], $stride
vld1.8 {q12}, [$src], $stride
vld1.8 {q13}, [$src], $stride
vld1.8 {q14}, [$src], $stride
vld1.8 {q15}, [$src], $stride
MEND
MACRO
ADD_DIFF_16x8 $diff
vqadd.u8 q8, q8, $diff
vqadd.u8 q9, q9, $diff
vqadd.u8 q10, q10, $diff
vqadd.u8 q11, q11, $diff
vqadd.u8 q12, q12, $diff
vqadd.u8 q13, q13, $diff
vqadd.u8 q14, q14, $diff
vqadd.u8 q15, q15, $diff
MEND
MACRO
SUB_DIFF_16x8 $diff
vqsub.u8 q8, q8, $diff
vqsub.u8 q9, q9, $diff
vqsub.u8 q10, q10, $diff
vqsub.u8 q11, q11, $diff
vqsub.u8 q12, q12, $diff
vqsub.u8 q13, q13, $diff
vqsub.u8 q14, q14, $diff
vqsub.u8 q15, q15, $diff
MEND
MACRO
ST_16x8 $dst, $stride
vst1.8 {q8}, [$dst], $stride
vst1.8 {q9}, [$dst], $stride
vst1.8 {q10}, [$dst], $stride
vst1.8 {q11}, [$dst], $stride
vst1.8 {q12}, [$dst], $stride
vst1.8 {q13}, [$dst], $stride
vst1.8 {q14}, [$dst], $stride
vst1.8 {q15}, [$dst], $stride
MEND
; void add_constant_residual(const int16_t diff, uint8_t *dest, int stride,
; int width, int height) {
; int r, c;
;
; for (r = 0; r < height; r++) {
; for (c = 0; c < width; c++)
; dest[c] = clip_pixel(diff + dest[c]);
;
; dest += stride;
; }
;}
;void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest,
; int stride) {
; add_constant_residual(diff, dest, stride, 8, 8);
;}
; r0 : const int16_t diff
; r1 : const uint8_t *dest
; r2 : int stride
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|vp9_add_constant_residual_8x8_neon| PROC
mov r3, r1 ; r3: save dest to r3
vld1.8 {d0}, [r1], r2
vld1.8 {d1}, [r1], r2
vld1.8 {d2}, [r1], r2
vld1.8 {d3}, [r1], r2
vld1.8 {d4}, [r1], r2
vld1.8 {d5}, [r1], r2
vld1.8 {d6}, [r1], r2
vld1.8 {d7}, [r1], r2
cmp r0, #0
bge DIFF_POSITIVE_8x8
DIFF_NEGATIVE_8x8 ; diff < 0
neg r0, r0
usat r0, #8, r0
vdup.u8 q8, r0
vqsub.u8 q0, q0, q8
vqsub.u8 q1, q1, q8
vqsub.u8 q2, q2, q8
vqsub.u8 q3, q3, q8
b DIFF_SAVE_8x8
DIFF_POSITIVE_8x8 ; diff >= 0
usat r0, #8, r0
vdup.u8 q8, r0
vqadd.u8 q0, q0, q8
vqadd.u8 q1, q1, q8
vqadd.u8 q2, q2, q8
vqadd.u8 q3, q3, q8
DIFF_SAVE_8x8
vst1.8 {d0}, [r3], r2
vst1.8 {d1}, [r3], r2
vst1.8 {d2}, [r3], r2
vst1.8 {d3}, [r3], r2
vst1.8 {d4}, [r3], r2
vst1.8 {d5}, [r3], r2
vst1.8 {d6}, [r3], r2
vst1.8 {d7}, [r3], r2
bx lr
ENDP
;void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest,
; int stride) {
; add_constant_residual(diff, dest, stride, 16, 16);
;}
; r0 : const int16_t diff
; r1 : const uint8_t *dest
; r2 : int stride
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|vp9_add_constant_residual_16x16_neon| PROC
mov r3, r1
LD_16x8 r1, r2
cmp r0, #0
bge DIFF_POSITIVE_16x16
|DIFF_NEGATIVE_16x16|
neg r0, r0
usat r0, #8, r0
vdup.u8 q0, r0
SUB_DIFF_16x8 q0
ST_16x8 r3, r2
LD_16x8 r1, r2
SUB_DIFF_16x8 q0
b DIFF_SAVE_16x16
|DIFF_POSITIVE_16x16|
usat r0, #8, r0
vdup.u8 q0, r0
ADD_DIFF_16x8 q0
ST_16x8 r3, r2
LD_16x8 r1, r2
ADD_DIFF_16x8 q0
|DIFF_SAVE_16x16|
ST_16x8 r3, r2
bx lr
ENDP
;void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest,
; int stride) {
; add_constant_residual(diff, dest, stride, 32, 32);
;}
; r0 : const int16_t diff
; r1 : const uint8_t *dest
; r2 : int stride
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|vp9_add_constant_residual_32x32_neon| PROC
push {r4,lr}
pld [r1]
mov r3, r1
add r4, r1, #16 ; r4 dest + 16 for second loop
cmp r0, #0
bge DIFF_POSITIVE_32x32
|DIFF_NEGATIVE_32x32|
neg r0, r0
usat r0, #8, r0
vdup.u8 q0, r0
mov r0, #4
|DIFF_NEGATIVE_32x32_LOOP|
sub r0, #1
LD_16x8 r1, r2
SUB_DIFF_16x8 q0
ST_16x8 r3, r2
LD_16x8 r1, r2
SUB_DIFF_16x8 q0
ST_16x8 r3, r2
cmp r0, #2
moveq r1, r4
moveq r3, r4
cmp r0, #0
bne DIFF_NEGATIVE_32x32_LOOP
pop {r4,pc}
|DIFF_POSITIVE_32x32|
usat r0, #8, r0
vdup.u8 q0, r0
mov r0, #4
|DIFF_POSITIVE_32x32_LOOP|
sub r0, #1
LD_16x8 r1, r2
ADD_DIFF_16x8 q0
ST_16x8 r3, r2
LD_16x8 r1, r2
ADD_DIFF_16x8 q0
ST_16x8 r3, r2
cmp r0, #2
moveq r1, r4
moveq r3, r4
cmp r0, #0
bne DIFF_POSITIVE_32x32_LOOP
pop {r4,pc}
ENDP
END
......@@ -12,33 +12,6 @@
#include "vp9/common/vp9_blockd.h"
#include "vp9/decoder/vp9_idct_blk.h"
static void add_constant_residual(const int16_t diff, uint8_t *dest, int stride,
int width, int height) {
int r, c;
for (r = 0; r < height; r++) {
for (c = 0; c < width; c++)
dest[c] = clip_pixel(diff + dest[c]);
dest += stride;
}
}
void vp9_add_constant_residual_8x8_c(const int16_t diff, uint8_t *dest,
int stride) {
add_constant_residual(diff, dest, stride, 8, 8);
}
void vp9_add_constant_residual_16x16_c(const int16_t diff, uint8_t *dest,
int stride) {
add_constant_residual(diff, dest, stride, 16, 16);
}
void vp9_add_constant_residual_32x32_c(const int16_t diff, uint8_t *dest,
int stride) {
add_constant_residual(diff, dest, stride, 32, 32);
}
void vp9_iht_add_c(TX_TYPE tx_type, int16_t *input, uint8_t *dest, int stride,
int eob) {
if (tx_type == DCT_DCT) {
......@@ -136,12 +109,9 @@ void vp9_idct_add_16x16_c(int16_t *input, uint8_t *dest, int stride, int eob) {
}
void vp9_idct_add_32x32_c(int16_t *input, uint8_t *dest, int stride, int eob) {
DECLARE_ALIGNED_ARRAY(16, int16_t, output, 1024);
if (eob) {
if (eob == 1) {
vp9_short_idct1_32x32(input, output);
vp9_add_constant_residual_32x32(output[0], dest, stride);
vp9_short_idct32x32_1_add(input, dest, stride);
input[0] = 0;
} else {
vp9_short_idct32x32_add(input, dest, stride);
......
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <assert.h>
#include <emmintrin.h> // SSE2
#include "./vpx_config.h"
#include "vpx/vpx_integer.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_idct.h"
void vp9_add_constant_residual_8x8_sse2(const int16_t diff, uint8_t *dest,
int stride) {
uint8_t abs_diff;
__m128i d;
// Prediction data.
__m128i p0 = _mm_loadl_epi64((const __m128i *)(dest + 0 * stride));
__m128i p1 = _mm_loadl_epi64((const __m128i *)(dest + 1 * stride));
__m128i p2 = _mm_loadl_epi64((const __m128i *)(dest + 2 * stride));
__m128i p3 = _mm_loadl_epi64((const __m128i *)(dest + 3 * stride));
__m128i p4 = _mm_loadl_epi64((const __m128i *)(dest + 4 * stride));
__m128i p5 = _mm_loadl_epi64((const __m128i *)(dest + 5 * stride));
__m128i p6 = _mm_loadl_epi64((const __m128i *)(dest + 6 * stride));
__m128i p7 = _mm_loadl_epi64((const __m128i *)(dest + 7 * stride));
p0 = _mm_unpacklo_epi64(p0, p1);
p2 = _mm_unpacklo_epi64(p2, p3);
p4 = _mm_unpacklo_epi64(p4, p5);
p6 = _mm_unpacklo_epi64(p6, p7);
// Clip diff value to [0, 255] range. Then, do addition or subtraction
// according to its sign.
if (diff >= 0) {
abs_diff = (diff > 255) ? 255 : diff;
d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
p0 = _mm_adds_epu8(p0, d);
p2 = _mm_adds_epu8(p2, d);
p4 = _mm_adds_epu8(p4, d);
p6 = _mm_adds_epu8(p6, d);
} else {
abs_diff = (diff < -255) ? 255 : -diff;
d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
p0 = _mm_subs_epu8(p0, d);
p2 = _mm_subs_epu8(p2, d);
p4 = _mm_subs_epu8(p4, d);
p6 = _mm_subs_epu8(p6, d);
}
_mm_storel_epi64((__m128i *)(dest + 0 * stride), p0);
p0 = _mm_srli_si128(p0, 8);
_mm_storel_epi64((__m128i *)(dest + 1 * stride), p0);
_mm_storel_epi64((__m128i *)(dest + 2 * stride), p2);
p2 = _mm_srli_si128(p2, 8);
_mm_storel_epi64((__m128i *)(dest + 3 * stride), p2);
_mm_storel_epi64((__m128i *)(dest + 4 * stride), p4);
p4 = _mm_srli_si128(p4, 8);
_mm_storel_epi64((__m128i *)(dest + 5 * stride), p4);
_mm_storel_epi64((__m128i *)(dest + 6 * stride), p6);
p6 = _mm_srli_si128(p6, 8);
_mm_storel_epi64((__m128i *)(dest + 7 * stride), p6);
}
void vp9_add_constant_residual_16x16_sse2(const int16_t diff, uint8_t *dest,
int stride) {
uint8_t abs_diff;
__m128i d;
// Prediction data.
__m128i p0 = _mm_load_si128((const __m128i *)(dest + 0 * stride));
__m128i p1 = _mm_load_si128((const __m128i *)(dest + 1 * stride));
__m128i p2 = _mm_load_si128((const __m128i *)(dest + 2 * stride));
__m128i p3 = _mm_load_si128((const __m128i *)(dest + 3 * stride));
__m128i p4 = _mm_load_si128((const __m128i *)(dest + 4 * stride));
__m128i p5 = _mm_load_si128((const __m128i *)(dest + 5 * stride));
__m128i p6 = _mm_load_si128((const __m128i *)(dest + 6 * stride));
__m128i p7 = _mm_load_si128((const __m128i *)(dest + 7 * stride));
__m128i p8 = _mm_load_si128((const __m128i *)(dest + 8 * stride));
__m128i p9 = _mm_load_si128((const __m128i *)(dest + 9 * stride));
__m128i p10 = _mm_load_si128((const __m128i *)(dest + 10 * stride));
__m128i p11 = _mm_load_si128((const __m128i *)(dest + 11 * stride));
__m128i p12 = _mm_load_si128((const __m128i *)(dest + 12 * stride));
__m128i p13 = _mm_load_si128((const __m128i *)(dest + 13 * stride));
__m128i p14 = _mm_load_si128((const __m128i *)(dest + 14 * stride));
__m128i p15 = _mm_load_si128((const __m128i *)(dest + 15 * stride));
// Clip diff value to [0, 255] range. Then, do addition or subtraction
// according to its sign.
if (diff >= 0) {
abs_diff = (diff > 255) ? 255 : diff;
d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
p0 = _mm_adds_epu8(p0, d);
p1 = _mm_adds_epu8(p1, d);
p2 = _mm_adds_epu8(p2, d);
p3 = _mm_adds_epu8(p3, d);
p4 = _mm_adds_epu8(p4, d);
p5 = _mm_adds_epu8(p5, d);
p6 = _mm_adds_epu8(p6, d);
p7 = _mm_adds_epu8(p7, d);
p8 = _mm_adds_epu8(p8, d);
p9 = _mm_adds_epu8(p9, d);
p10 = _mm_adds_epu8(p10, d);
p11 = _mm_adds_epu8(p11, d);
p12 = _mm_adds_epu8(p12, d);
p13 = _mm_adds_epu8(p13, d);
p14 = _mm_adds_epu8(p14, d);
p15 = _mm_adds_epu8(p15, d);
} else {
abs_diff = (diff < -255) ? 255 : -diff;
d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
p0 = _mm_subs_epu8(p0, d);
p1 = _mm_subs_epu8(p1, d);
p2 = _mm_subs_epu8(p2, d);
p3 = _mm_subs_epu8(p3, d);
p4 = _mm_subs_epu8(p4, d);
p5 = _mm_subs_epu8(p5, d);
p6 = _mm_subs_epu8(p6, d);
p7 = _mm_subs_epu8(p7, d);
p8 = _mm_subs_epu8(p8, d);
p9 = _mm_subs_epu8(p9, d);
p10 = _mm_subs_epu8(p10, d);
p11 = _mm_subs_epu8(p11, d);
p12 = _mm_subs_epu8(p12, d);
p13 = _mm_subs_epu8(p13, d);
p14 = _mm_subs_epu8(p14, d);
p15 = _mm_subs_epu8(p15, d);
}
// Store results
_mm_store_si128((__m128i *)(dest + 0 * stride), p0);
_mm_store_si128((__m128i *)(dest + 1 * stride), p1);
_mm_store_si128((__m128i *)(dest + 2 * stride), p2);
_mm_store_si128((__m128i *)(dest + 3 * stride), p3);
_mm_store_si128((__m128i *)(dest + 4 * stride), p4);
_mm_store_si128((__m128i *)(dest + 5 * stride), p5);
_mm_store_si128((__m128i *)(dest + 6 * stride), p6);
_mm_store_si128((__m128i *)(dest + 7 * stride), p7);
_mm_store_si128((__m128i *)(dest + 8 * stride), p8);
_mm_store_si128((__m128i *)(dest + 9 * stride), p9);
_mm_store_si128((__m128i *)(dest + 10 * stride), p10);
_mm_store_si128((__m128i *)(dest + 11 * stride), p11);
_mm_store_si128((__m128i *)(dest + 12 * stride), p12);
_mm_store_si128((__m128i *)(dest + 13 * stride), p13);
_mm_store_si128((__m128i *)(dest + 14 * stride), p14);
_mm_store_si128((__m128i *)(dest + 15 * stride), p15);
}
void vp9_add_constant_residual_32x32_sse2(const int16_t diff, uint8_t *dest,
int stride) {
uint8_t abs_diff;
__m128i d;
int i = 8;
if (diff >= 0) {
abs_diff = (diff > 255) ? 255 : diff;
d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
} else {
abs_diff = (diff < -255) ? 255 : -diff;
d = _mm_shuffle_epi32(_mm_cvtsi32_si128((int)(abs_diff * 0x01010101u)), 0);
}
do {
// Prediction data.
__m128i p0 = _mm_load_si128((const __m128i *)(dest + 0 * stride));
__m128i p1 = _mm_load_si128((const __m128i *)(dest + 0 * stride + 16));
__m128i p2 = _mm_load_si128((const __m128i *)(dest + 1 * stride));
__m128i p3 = _mm_load_si128((const __m128i *)(dest + 1 * stride + 16));
__m128i p4 = _mm_load_si128((const __m128i *)(dest + 2 * stride));
__m128i p5 = _mm_load_si128((const __m128i *)(dest + 2 * stride + 16));
__m128i p6 = _mm_load_si128((const __m128i *)(dest + 3 * stride));
__m128i p7 = _mm_load_si128((const __m128i *)(dest + 3 * stride + 16));
// Clip diff value to [0, 255] range. Then, do addition or subtraction
// according to its sign.
if (diff >= 0) {
p0 = _mm_adds_epu8(p0, d);
p1 = _mm_adds_epu8(p1, d);
p2 = _mm_adds_epu8(p2, d);
p3 = _mm_adds_epu8(p3, d);
p4 = _mm_adds_epu8(p4, d);
p5 = _mm_adds_epu8(p5, d);
p6 = _mm_adds_epu8(p6, d);
p7 = _mm_adds_epu8(p7, d);
} else {
p0 = _mm_subs_epu8(p0, d);
p1 = _mm_subs_epu8(p1, d);
p2 = _mm_subs_epu8(p2, d);
p3 = _mm_subs_epu8(p3, d);
p4 = _mm_subs_epu8(p4, d);
p5 = _mm_subs_epu8(p5, d);
p6 = _mm_subs_epu8(p6, d);
p7 = _mm_subs_epu8(p7, d);
}
// Store results
_mm_store_si128((__m128i *)(dest + 0 * stride), p0);
_mm_store_si128((__m128i *)(dest + 0 * stride + 16), p1);
_mm_store_si128((__m128i *)(dest + 1 * stride), p2);
_mm_store_si128((__m128i *)(dest + 1 * stride + 16), p3);
_mm_store_si128((__m128i *)(dest + 2 * stride), p4);
_mm_store_si128((__m128i *)(dest + 2 * stride + 16), p5);
_mm_store_si128((__m128i *)(dest + 3 * stride), p6);
_mm_store_si128((__m128i *)(dest + 3 * stride + 16), p7);
dest += 4 * stride;
} while (--i);
}
......@@ -38,6 +38,3 @@ VP9_DX_SRCS-yes += decoder/vp9_dsubexp.c
VP9_DX_SRCS-yes += decoder/vp9_dsubexp.h
VP9_DX_SRCS-yes := $(filter-out $(VP9_DX_SRCS_REMOVE-yes),$(VP9_DX_SRCS-yes))
VP9_DX_SRCS-$(HAVE_SSE2) += decoder/x86/vp9_dequantize_sse2.c
VP9_DX_SRCS-$(HAVE_NEON) += decoder/arm/neon/vp9_add_constant_residual_neon$(ASM)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment