Commit 09eea219 authored by Urvang Joshi's avatar Urvang Joshi
Browse files

Fix warnings reported by -Wshadow: Part1: aom_dsp directory

While we are at it:
- Rename some variables to more meaningful names
- Reuse some common consts from a header instead of redefining them.

Change-Id: I75c4248cb75aa54c52111686f139b096dc119328
parent 7560123c
...@@ -53,10 +53,10 @@ void aom_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) { ...@@ -53,10 +53,10 @@ void aom_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), (int16_t)cospi_8_64); v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), (int16_t)cospi_8_64);
v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), (int16_t)cospi_8_64); v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), (int16_t)cospi_8_64);
v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), (int16_t)cospi_8_64); v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), (int16_t)cospi_8_64);
v_t0_lo = vmulq_n_s32(v_t0_lo, cospi_16_64); v_t0_lo = vmulq_n_s32(v_t0_lo, (int32_t)cospi_16_64);
v_t0_hi = vmulq_n_s32(v_t0_hi, cospi_16_64); v_t0_hi = vmulq_n_s32(v_t0_hi, (int32_t)cospi_16_64);
v_t1_lo = vmulq_n_s32(v_t1_lo, cospi_16_64); v_t1_lo = vmulq_n_s32(v_t1_lo, (int32_t)cospi_16_64);
v_t1_hi = vmulq_n_s32(v_t1_hi, cospi_16_64); v_t1_hi = vmulq_n_s32(v_t1_hi, (int32_t)cospi_16_64);
{ {
const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS); const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS); const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
......
...@@ -20,7 +20,7 @@ void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -20,7 +20,7 @@ void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
int16x8_t q0s16; int16x8_t q0s16;
uint8_t *d1, *d2; uint8_t *d1, *d2;
int16_t i, j, a1, cospi_16_64 = 11585; int16_t i, j, a1;
int16_t out = dct_const_round_shift(input[0] * cospi_16_64); int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
out = dct_const_round_shift(out * cospi_16_64); out = dct_const_round_shift(out * cospi_16_64);
a1 = ROUND_POWER_OF_TWO(out, 6); a1 = ROUND_POWER_OF_TWO(out, 6);
......
...@@ -137,8 +137,8 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, ...@@ -137,8 +137,8 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
d31s16 = vget_high_s16(q15s16); d31s16 = vget_high_s16(q15s16);
// stage 3 // stage 3
d0s16 = vdup_n_s16(cospi_28_64); d0s16 = vdup_n_s16((int16_t)cospi_28_64);
d1s16 = vdup_n_s16(cospi_4_64); d1s16 = vdup_n_s16((int16_t)cospi_4_64);
q2s32 = vmull_s16(d18s16, d0s16); q2s32 = vmull_s16(d18s16, d0s16);
q3s32 = vmull_s16(d19s16, d0s16); q3s32 = vmull_s16(d19s16, d0s16);
...@@ -150,8 +150,8 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, ...@@ -150,8 +150,8 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
q5s32 = vmlal_s16(q5s32, d30s16, d0s16); q5s32 = vmlal_s16(q5s32, d30s16, d0s16);
q6s32 = vmlal_s16(q6s32, d31s16, d0s16); q6s32 = vmlal_s16(q6s32, d31s16, d0s16);
d2s16 = vdup_n_s16(cospi_12_64); d2s16 = vdup_n_s16((int16_t)cospi_12_64);
d3s16 = vdup_n_s16(cospi_20_64); d3s16 = vdup_n_s16((int16_t)cospi_20_64);
d8s16 = vqrshrn_n_s32(q2s32, 14); d8s16 = vqrshrn_n_s32(q2s32, 14);
d9s16 = vqrshrn_n_s32(q3s32, 14); d9s16 = vqrshrn_n_s32(q3s32, 14);
...@@ -178,15 +178,15 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, ...@@ -178,15 +178,15 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
q6s16 = vcombine_s16(d12s16, d13s16); q6s16 = vcombine_s16(d12s16, d13s16);
// stage 4 // stage 4
d30s16 = vdup_n_s16(cospi_16_64); d30s16 = vdup_n_s16((int16_t)cospi_16_64);
q2s32 = vmull_s16(d16s16, d30s16); q2s32 = vmull_s16(d16s16, d30s16);
q11s32 = vmull_s16(d17s16, d30s16); q11s32 = vmull_s16(d17s16, d30s16);
q0s32 = vmull_s16(d24s16, d30s16); q0s32 = vmull_s16(d24s16, d30s16);
q1s32 = vmull_s16(d25s16, d30s16); q1s32 = vmull_s16(d25s16, d30s16);
d30s16 = vdup_n_s16(cospi_24_64); d30s16 = vdup_n_s16((int16_t)cospi_24_64);
d31s16 = vdup_n_s16(cospi_8_64); d31s16 = vdup_n_s16((int16_t)cospi_8_64);
q3s32 = vaddq_s32(q2s32, q0s32); q3s32 = vaddq_s32(q2s32, q0s32);
q12s32 = vaddq_s32(q11s32, q1s32); q12s32 = vaddq_s32(q11s32, q1s32);
...@@ -232,7 +232,7 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, ...@@ -232,7 +232,7 @@ void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out,
q2s16 = vsubq_s16(q9s16, q10s16); q2s16 = vsubq_s16(q9s16, q10s16);
q3s16 = vsubq_s16(q8s16, q11s16); q3s16 = vsubq_s16(q8s16, q11s16);
d16s16 = vdup_n_s16(cospi_16_64); d16s16 = vdup_n_s16((int16_t)cospi_16_64);
q11s32 = vmull_s16(d26s16, d16s16); q11s32 = vmull_s16(d26s16, d16s16);
q12s32 = vmull_s16(d27s16, d16s16); q12s32 = vmull_s16(d27s16, d16s16);
...@@ -378,8 +378,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -378,8 +378,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
d31s16 = vget_high_s16(q15s16); d31s16 = vget_high_s16(q15s16);
// stage 3 // stage 3
d12s16 = vdup_n_s16(cospi_30_64); d12s16 = vdup_n_s16((int16_t)cospi_30_64);
d13s16 = vdup_n_s16(cospi_2_64); d13s16 = vdup_n_s16((int16_t)cospi_2_64);
q2s32 = vmull_s16(d16s16, d12s16); q2s32 = vmull_s16(d16s16, d12s16);
q3s32 = vmull_s16(d17s16, d12s16); q3s32 = vmull_s16(d17s16, d12s16);
...@@ -398,8 +398,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -398,8 +398,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
q0s16 = vcombine_s16(d0s16, d1s16); q0s16 = vcombine_s16(d0s16, d1s16);
q7s16 = vcombine_s16(d14s16, d15s16); q7s16 = vcombine_s16(d14s16, d15s16);
d30s16 = vdup_n_s16(cospi_14_64); d30s16 = vdup_n_s16((int16_t)cospi_14_64);
d31s16 = vdup_n_s16(cospi_18_64); d31s16 = vdup_n_s16((int16_t)cospi_18_64);
q2s32 = vmull_s16(d24s16, d30s16); q2s32 = vmull_s16(d24s16, d30s16);
q3s32 = vmull_s16(d25s16, d30s16); q3s32 = vmull_s16(d25s16, d30s16);
...@@ -418,8 +418,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -418,8 +418,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
q1s16 = vcombine_s16(d2s16, d3s16); q1s16 = vcombine_s16(d2s16, d3s16);
q6s16 = vcombine_s16(d12s16, d13s16); q6s16 = vcombine_s16(d12s16, d13s16);
d30s16 = vdup_n_s16(cospi_22_64); d30s16 = vdup_n_s16((int16_t)cospi_22_64);
d31s16 = vdup_n_s16(cospi_10_64); d31s16 = vdup_n_s16((int16_t)cospi_10_64);
q11s32 = vmull_s16(d20s16, d30s16); q11s32 = vmull_s16(d20s16, d30s16);
q12s32 = vmull_s16(d21s16, d30s16); q12s32 = vmull_s16(d21s16, d30s16);
...@@ -438,8 +438,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -438,8 +438,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
q2s16 = vcombine_s16(d4s16, d5s16); q2s16 = vcombine_s16(d4s16, d5s16);
q5s16 = vcombine_s16(d10s16, d11s16); q5s16 = vcombine_s16(d10s16, d11s16);
d30s16 = vdup_n_s16(cospi_6_64); d30s16 = vdup_n_s16((int16_t)cospi_6_64);
d31s16 = vdup_n_s16(cospi_26_64); d31s16 = vdup_n_s16((int16_t)cospi_26_64);
q10s32 = vmull_s16(d28s16, d30s16); q10s32 = vmull_s16(d28s16, d30s16);
q11s32 = vmull_s16(d29s16, d30s16); q11s32 = vmull_s16(d29s16, d30s16);
...@@ -478,8 +478,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -478,8 +478,8 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
d28s16 = vget_low_s16(q14s16); d28s16 = vget_low_s16(q14s16);
d29s16 = vget_high_s16(q14s16); d29s16 = vget_high_s16(q14s16);
d30s16 = vdup_n_s16(cospi_8_64); d30s16 = vdup_n_s16((int16_t)cospi_8_64);
d31s16 = vdup_n_s16(cospi_24_64); d31s16 = vdup_n_s16((int16_t)cospi_24_64);
q2s32 = vmull_s16(d18s16, d31s16); q2s32 = vmull_s16(d18s16, d31s16);
q3s32 = vmull_s16(d19s16, d31s16); q3s32 = vmull_s16(d19s16, d31s16);
...@@ -539,7 +539,7 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -539,7 +539,7 @@ void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out,
d26s16 = vget_low_s16(q13s16); d26s16 = vget_low_s16(q13s16);
d27s16 = vget_high_s16(q13s16); d27s16 = vget_high_s16(q13s16);
d14s16 = vdup_n_s16(cospi_16_64); d14s16 = vdup_n_s16((int16_t)cospi_16_64);
q3s32 = vmull_s16(d26s16, d14s16); q3s32 = vmull_s16(d26s16, d14s16);
q4s32 = vmull_s16(d27s16, d14s16); q4s32 = vmull_s16(d27s16, d14s16);
...@@ -903,15 +903,15 @@ void aom_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out, ...@@ -903,15 +903,15 @@ void aom_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out,
&q15s16); &q15s16);
// stage 3 // stage 3
q0s16 = vdupq_n_s16(cospi_28_64 * 2); q0s16 = vdupq_n_s16((int16_t)(cospi_28_64 * 2));
q1s16 = vdupq_n_s16(cospi_4_64 * 2); q1s16 = vdupq_n_s16((int16_t)(cospi_4_64 * 2));
q4s16 = vqrdmulhq_s16(q9s16, q0s16); q4s16 = vqrdmulhq_s16(q9s16, q0s16);
q7s16 = vqrdmulhq_s16(q9s16, q1s16); q7s16 = vqrdmulhq_s16(q9s16, q1s16);
// stage 4 // stage 4
q1s16 = vdupq_n_s16(cospi_16_64 * 2); q1s16 = vdupq_n_s16((int16_t)(cospi_16_64 * 2));
d4s16 = vdup_n_s16(cospi_16_64); d4s16 = vdup_n_s16((int16_t)cospi_16_64);
q8s16 = vqrdmulhq_s16(q8s16, q1s16); q8s16 = vqrdmulhq_s16(q8s16, q1s16);
...@@ -1046,13 +1046,13 @@ void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -1046,13 +1046,13 @@ void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
&q15s16); &q15s16);
// stage 3 // stage 3
q6s16 = vdupq_n_s16(cospi_30_64 * 2); q6s16 = vdupq_n_s16((int16_t)(cospi_30_64 * 2));
q0s16 = vqrdmulhq_s16(q8s16, q6s16); q0s16 = vqrdmulhq_s16(q8s16, q6s16);
q6s16 = vdupq_n_s16(cospi_2_64 * 2); q6s16 = vdupq_n_s16((int16_t)(cospi_2_64 * 2));
q7s16 = vqrdmulhq_s16(q8s16, q6s16); q7s16 = vqrdmulhq_s16(q8s16, q6s16);
q15s16 = vdupq_n_s16(-cospi_26_64 * 2); q15s16 = vdupq_n_s16(-cospi_26_64 * 2);
q14s16 = vdupq_n_s16(cospi_6_64 * 2); q14s16 = vdupq_n_s16((int16_t)(cospi_6_64 * 2));
q3s16 = vqrdmulhq_s16(q9s16, q15s16); q3s16 = vqrdmulhq_s16(q9s16, q15s16);
q4s16 = vqrdmulhq_s16(q9s16, q14s16); q4s16 = vqrdmulhq_s16(q9s16, q14s16);
...@@ -1066,8 +1066,8 @@ void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -1066,8 +1066,8 @@ void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
d14s16 = vget_low_s16(q7s16); d14s16 = vget_low_s16(q7s16);
d15s16 = vget_high_s16(q7s16); d15s16 = vget_high_s16(q7s16);
d30s16 = vdup_n_s16(cospi_8_64); d30s16 = vdup_n_s16((int16_t)cospi_8_64);
d31s16 = vdup_n_s16(cospi_24_64); d31s16 = vdup_n_s16((int16_t)cospi_24_64);
q12s32 = vmull_s16(d14s16, d31s16); q12s32 = vmull_s16(d14s16, d31s16);
q5s32 = vmull_s16(d15s16, d31s16); q5s32 = vmull_s16(d15s16, d31s16);
...@@ -1124,7 +1124,7 @@ void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out, ...@@ -1124,7 +1124,7 @@ void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out,
d26s16 = vget_low_s16(q13s16); d26s16 = vget_low_s16(q13s16);
d27s16 = vget_high_s16(q13s16); d27s16 = vget_high_s16(q13s16);
d14s16 = vdup_n_s16(cospi_16_64); d14s16 = vdup_n_s16((int16_t)cospi_16_64);
q3s32 = vmull_s16(d26s16, d14s16); q3s32 = vmull_s16(d26s16, d14s16);
q4s32 = vmull_s16(d27s16, d14s16); q4s32 = vmull_s16(d27s16, d14s16);
q0s32 = vmull_s16(d20s16, d14s16); q0s32 = vmull_s16(d20s16, d14s16);
......
...@@ -98,7 +98,7 @@ void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -98,7 +98,7 @@ void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8; uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8;
int i, j, dest_stride8; int i, j, dest_stride8;
uint8_t *d; uint8_t *d;
int16_t a1, cospi_16_64 = 11585; int16_t a1;
int16_t out = dct_const_round_shift(input[0] * cospi_16_64); int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
out = dct_const_round_shift(out * cospi_16_64); out = dct_const_round_shift(out * cospi_16_64);
......
...@@ -20,7 +20,7 @@ void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -20,7 +20,7 @@ void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint16x8_t q8u16; uint16x8_t q8u16;
int16x8_t q0s16; int16x8_t q0s16;
uint8_t *d1, *d2; uint8_t *d1, *d2;
int16_t i, a1, cospi_16_64 = 11585; int16_t i, a1;
int16_t out = dct_const_round_shift(input[0] * cospi_16_64); int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
out = dct_const_round_shift(out * cospi_16_64); out = dct_const_round_shift(out * cospi_16_64);
a1 = ROUND_POWER_OF_TWO(out, 4); a1 = ROUND_POWER_OF_TWO(out, 4);
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <arm_neon.h> #include <arm_neon.h>
#include "aom_dsp/txfm_common.h"
void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint8x8_t d26u8, d27u8; uint8x8_t d26u8, d27u8;
uint32x2_t d26u32, d27u32; uint32x2_t d26u32, d27u32;
...@@ -22,9 +24,6 @@ void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -22,9 +24,6 @@ void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
int16x4x2_t d0x2s16, d1x2s16; int16x4x2_t d0x2s16, d1x2s16;
int32x4x2_t q0x2s32; int32x4x2_t q0x2s32;
uint8_t *d; uint8_t *d;
int16_t cospi_8_64 = 15137;
int16_t cospi_16_64 = 11585;
int16_t cospi_24_64 = 6270;
d26u32 = d27u32 = vdup_n_u32(0); d26u32 = d27u32 = vdup_n_u32(0);
...@@ -41,8 +40,8 @@ void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -41,8 +40,8 @@ void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]); q8s16 = vcombine_s16(d0x2s16.val[0], d0x2s16.val[1]);
q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]); q9s16 = vcombine_s16(d1x2s16.val[0], d1x2s16.val[1]);
d20s16 = vdup_n_s16(cospi_8_64); d20s16 = vdup_n_s16((int16_t)cospi_8_64);
d21s16 = vdup_n_s16(cospi_16_64); d21s16 = vdup_n_s16((int16_t)cospi_16_64);
q0x2s32 = q0x2s32 =
vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16)); vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q9s16));
...@@ -51,7 +50,7 @@ void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -51,7 +50,7 @@ void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d18s16 = vget_low_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1])); d19s16 = vget_high_s16(vreinterpretq_s16_s32(q0x2s32.val[1]));
d22s16 = vdup_n_s16(cospi_24_64); d22s16 = vdup_n_s16((int16_t)cospi_24_64);
// stage 1 // stage 1
d23s16 = vadd_s16(d16s16, d18s16); d23s16 = vadd_s16(d16s16, d18s16);
......
...@@ -20,7 +20,7 @@ void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -20,7 +20,7 @@ void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16;
int16x8_t q0s16; int16x8_t q0s16;
uint8_t *d1, *d2; uint8_t *d1, *d2;
int16_t i, a1, cospi_16_64 = 11585; int16_t i, a1;
int16_t out = dct_const_round_shift(input[0] * cospi_16_64); int16_t out = dct_const_round_shift(input[0] * cospi_16_64);
out = dct_const_round_shift(out * cospi_16_64); out = dct_const_round_shift(out * cospi_16_64);
a1 = ROUND_POWER_OF_TWO(out, 5); a1 = ROUND_POWER_OF_TWO(out, 5);
......
...@@ -90,10 +90,10 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, ...@@ -90,10 +90,10 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32; int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32; int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
d0s16 = vdup_n_s16(cospi_28_64); d0s16 = vdup_n_s16((int16_t)cospi_28_64);
d1s16 = vdup_n_s16(cospi_4_64); d1s16 = vdup_n_s16((int16_t)cospi_4_64);
d2s16 = vdup_n_s16(cospi_12_64); d2s16 = vdup_n_s16((int16_t)cospi_12_64);
d3s16 = vdup_n_s16(cospi_20_64); d3s16 = vdup_n_s16((int16_t)cospi_20_64);
d16s16 = vget_low_s16(*q8s16); d16s16 = vget_low_s16(*q8s16);
d17s16 = vget_high_s16(*q8s16); d17s16 = vget_high_s16(*q8s16);
...@@ -146,7 +146,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, ...@@ -146,7 +146,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
q6s16 = vcombine_s16(d12s16, d13s16); q6s16 = vcombine_s16(d12s16, d13s16);
q7s16 = vcombine_s16(d14s16, d15s16); q7s16 = vcombine_s16(d14s16, d15s16);
d0s16 = vdup_n_s16(cospi_16_64); d0s16 = vdup_n_s16((int16_t)cospi_16_64);
q2s32 = vmull_s16(d16s16, d0s16); q2s32 = vmull_s16(d16s16, d0s16);
q3s32 = vmull_s16(d17s16, d0s16); q3s32 = vmull_s16(d17s16, d0s16);
...@@ -158,8 +158,8 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, ...@@ -158,8 +158,8 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
q13s32 = vmlsl_s16(q13s32, d24s16, d0s16); q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
q15s32 = vmlsl_s16(q15s32, d25s16, d0s16); q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
d0s16 = vdup_n_s16(cospi_24_64); d0s16 = vdup_n_s16((int16_t)cospi_24_64);
d1s16 = vdup_n_s16(cospi_8_64); d1s16 = vdup_n_s16((int16_t)cospi_8_64);
d18s16 = vqrshrn_n_s32(q2s32, 14); d18s16 = vqrshrn_n_s32(q2s32, 14);
d19s16 = vqrshrn_n_s32(q3s32, 14); d19s16 = vqrshrn_n_s32(q3s32, 14);
...@@ -199,7 +199,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, ...@@ -199,7 +199,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
d28s16 = vget_low_s16(*q14s16); d28s16 = vget_low_s16(*q14s16);
d29s16 = vget_high_s16(*q14s16); d29s16 = vget_high_s16(*q14s16);
d16s16 = vdup_n_s16(cospi_16_64); d16s16 = vdup_n_s16((int16_t)cospi_16_64);
q9s32 = vmull_s16(d28s16, d16s16); q9s32 = vmull_s16(d28s16, d16s16);
q10s32 = vmull_s16(d29s16, d16s16); q10s32 = vmull_s16(d29s16, d16s16);
...@@ -356,29 +356,29 @@ void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -356,29 +356,29 @@ void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
// First transform rows // First transform rows
// stage 1 // stage 1
q0s16 = vdupq_n_s16(cospi_28_64 * 2); q0s16 = vdupq_n_s16((int16_t)cospi_28_64 * 2);
q1s16 = vdupq_n_s16(cospi_4_64 * 2); q1s16 = vdupq_n_s16((int16_t)cospi_4_64 * 2);
q4s16 = vqrdmulhq_s16(q9s16, q0s16); q4s16 = vqrdmulhq_s16(q9s16, q0s16);
q0s16 = vdupq_n_s16(-cospi_20_64 * 2); q0s16 = vdupq_n_s16(-(int16_t)cospi_20_64 * 2);
q7s16 = vqrdmulhq_s16(q9s16, q1s16); q7s16 = vqrdmulhq_s16(q9s16, q1s16);
q1s16 = vdupq_n_s16(cospi_12_64 * 2); q1s16 = vdupq_n_s16((int16_t)cospi_12_64 * 2);
q5s16 = vqrdmulhq_s16(q11s16, q0s16); q5s16 = vqrdmulhq_s16(q11s16, q0s16);
q0s16 = vdupq_n_s16(cospi_16_64 * 2); q0s16 = vdupq_n_s16((int16_t)cospi_16_64 * 2);
q6s16 = vqrdmulhq_s16(q11s16, q1s16); q6s16 = vqrdmulhq_s16(q11s16, q1s16);
// stage 2 & stage 3 - even half // stage 2 & stage 3 - even half
q1s16 = vdupq_n_s16(cospi_24_64 * 2); q1s16 = vdupq_n_s16((int16_t)cospi_24_64 * 2);
q9s16 = vqrdmulhq_s16(q8s16, q0s16); q9s16 = vqrdmulhq_s16(q8s16, q0s16);
q0s16 = vdupq_n_s16(cospi_8_64 * 2); q0s16 = vdupq_n_s16((int16_t)cospi_8_64 * 2);
q13s16 = vqrdmulhq_s16(q10s16, q1s16); q13s16 = vqrdmulhq_s16(q10s16, q1s16);
...@@ -400,7 +400,7 @@ void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { ...@@ -400,7 +400,7 @@ void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) {
d28s16 = vget_low_s16(q14s16); d28s16 = vget_low_s16(q14s16);
d29s16 = vget_high_s16(q14s16); d29s16 = vget_high_s16(q14s16);
d16s16 = vdup_n_s16(cospi_16_64); d16s16 = vdup_n_s16((int16_t)cospi_16_64);
q9s32 = vmull_s16(d28s16, d16s16); q9s32 = vmull_s16(d28s16, d16s16);
q10s32 = vmull_s16(d29s16, d16s16); q10s32 = vmull_s16(d29s16, d16s16);
q11s32 = vmull_s16(d28s16, d16s16); q11s32 = vmull_s16(d28s16, d16s16);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent. * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/ */
#include <assert.h>
#include "aom_dsp/fwd_txfm.h" #include "aom_dsp/fwd_txfm.h"
void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
...@@ -21,36 +22,37 @@ void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { ...@@ -21,36 +22,37 @@ void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
int pass; int pass;
// We need an intermediate buffer between passes. // We need an intermediate buffer between passes.
tran_low_t intermediate[4 * 4]; tran_low_t intermediate[4 * 4];
const int16_t *in_pass0 = input; const tran_low_t *in_low = NULL;
const tran_low_t *in = NULL;
tran_low_t *out = intermediate; tran_low_t *out = intermediate;
// Do the two transform/transpose passes // Do the two transform/transpose passes
for (pass = 0; pass < 2; ++pass) { for (pass = 0; pass < 2; ++pass) {
tran_high_t input[4]; // canbe16 tran_high_t in_high[4]; // canbe16
tran_high_t step[4]; // canbe16 tran_high_t step[4]; // canbe16
tran_high_t temp1, temp2; // needs32 tran_high_t temp1, temp2; // needs32
int i; int i;
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
// Load inputs. // Load inputs.
if (0 == pass) { if (pass == 0) {
input[0] = in_pass0[0 * stride] * 16; in_high[0] = input[0 * stride] * 16;
input[1] = in_pass0[1 * stride] * 16; in_high[1] = input[1 * stride] * 16;
input[2] = in_pass0[2 * stride] * 16; in_high[2] = input[2 * stride] * 16;
input[3] = in_pass0[3 * stride] * 16; in_high[3] = input[3 * stride] * 16;
if (i == 0 && input[0]) { if (i == 0 && in_high[0]) {
input[0] += 1; ++in_high[0];
} }
} else { } else {
input[0] = in[0 * 4]; assert(in_low != NULL);
input[1] = in[1 * 4]; in_high[0] = in_low[0 * 4];
input[2] = in[2 * 4]; in_high[1] = in_low[1 * 4];
input[3] = in[3 * 4]; in_high[2] = in_low[2 * 4];
in_high[3] = in_low[3 * 4];
++in_low;
} }
// Transform. // Transform.
step[0] = input[0] + input[3]; step[0] = in_high[0] + in_high[3];
step[1] = input[1] + input[2]; step[1] = in_high[1] + in_high[2];
step[2] = input[1] - input[2]; step[2] = in_high[1] - in_high[2];
step[3] = input[0] - input[3]; step[3] = in_high[0] - in_high[3];
temp1 = (step[0] + step[1]) * cospi_16_64; temp1 = (step[0] + step[1]) * cospi_16_64;
temp2 = (step[0] - step[1]) * cospi_16_64; temp2 = (step[0] - step[1]) * cospi_16_64;
out[0] = (tran_low_t)fdct_round_shift(temp1); out[0] = (tran_low_t)fdct_round_shift(temp1);
...@@ -60,12 +62,11 @@ void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { ...@@ -60,12 +62,11 @@ void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
out[1] = (tran_low_t)fdct_round_shift(temp1); out[1] = (tran_low_t)fdct_round_shift(temp1);
out[3] = (tran_low_t)fdct_round_shift(temp2); out[3] = (tran_low_t)fdct_round_shift(temp2);
// Do next column (which is a transposed row in second/horizontal pass) // Do next column (which is a transposed row in second/horizontal pass)
in_pass0++;