Commit 454280da authored by Urvang Joshi's avatar Urvang Joshi

Fix warnings reported by -Wshadow: Part2: av1 directory

While we are at it:
- Rename some variables to more meaningful names
- Reuse some common consts from a header instead of redefining them.

Cherry-picked from aomedia/master: 863b0499

Change-Id: Ida5de713156dc0126a27f90fdd36d29a398a3c88
parent 03f6fdcf
......@@ -580,7 +580,7 @@ static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx,
// Initialize the decoder workers on the first frame.
if (ctx->frame_workers == NULL) {
const aom_codec_err_t res = init_decoder(ctx);
res = init_decoder(ctx);
if (res != AOM_CODEC_OK) return res;
}
......@@ -646,7 +646,6 @@ static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx,
for (i = 0; i < frame_count; ++i) {
const uint8_t *data_start_copy = data_start;
const uint32_t frame_size = frame_sizes[i];
aom_codec_err_t res;
if (data_start < data ||
frame_size > (uint32_t)(data_end - data_start)) {
set_error_detail(ctx, "Invalid frame size in index");
......@@ -662,8 +661,7 @@ static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx,
} else {
while (data_start < data_end) {
const uint32_t frame_size = (uint32_t)(data_end - data_start);
const aom_codec_err_t res =
decode_one(ctx, &data_start, frame_size, user_priv, deadline);
res = decode_one(ctx, &data_start, frame_size, user_priv, deadline);
if (res != AOM_CODEC_OK) return res;
// Account for suboptimal termination by the encoder.
......
......@@ -12,18 +12,11 @@
#include <arm_neon.h>
#include <assert.h>
#include "./av1_rtcd.h"
#include "./aom_config.h"
#include "./av1_rtcd.h"
#include "aom_dsp/txfm_common.h"
#include "av1/common/common.h"
static int16_t sinpi_1_9 = 0x14a3;
static int16_t sinpi_2_9 = 0x26c9;
static int16_t sinpi_3_9 = 0x3441;
static int16_t sinpi_4_9 = 0x3b6c;
static int16_t cospi_8_64 = 0x3b21;
static int16_t cospi_16_64 = 0x2d41;
static int16_t cospi_24_64 = 0x187e;
static INLINE void TRANSPOSE4X4(int16x8_t *q8s16, int16x8_t *q9s16) {
int32x4_t q8s32, q9s32;
int16x4x2_t d0x2s16, d1x2s16;
......@@ -43,18 +36,18 @@ static INLINE void TRANSPOSE4X4(int16x8_t *q8s16, int16x8_t *q9s16) {
static INLINE void GENERATE_COSINE_CONSTANTS(int16x4_t *d0s16, int16x4_t *d1s16,
int16x4_t *d2s16) {
*d0s16 = vdup_n_s16(cospi_8_64);
*d1s16 = vdup_n_s16(cospi_16_64);
*d2s16 = vdup_n_s16(cospi_24_64);
*d0s16 = vdup_n_s16((int16_t)cospi_8_64);
*d1s16 = vdup_n_s16((int16_t)cospi_16_64);
*d2s16 = vdup_n_s16((int16_t)cospi_24_64);
return;
}
static INLINE void GENERATE_SINE_CONSTANTS(int16x4_t *d3s16, int16x4_t *d4s16,
int16x4_t *d5s16, int16x8_t *q3s16) {
*d3s16 = vdup_n_s16(sinpi_1_9);
*d4s16 = vdup_n_s16(sinpi_2_9);
*q3s16 = vdupq_n_s16(sinpi_3_9);
*d5s16 = vdup_n_s16(sinpi_4_9);
*d3s16 = vdup_n_s16((int16_t)sinpi_1_9);
*d4s16 = vdup_n_s16((int16_t)sinpi_2_9);
*q3s16 = vdupq_n_s16((int16_t)sinpi_3_9);
*d5s16 = vdup_n_s16((int16_t)sinpi_4_9);
return;
}
......@@ -121,7 +114,7 @@ static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16,
q10s32 = vaddq_s32(q10s32, q13s32);
q10s32 = vaddq_s32(q10s32, q8s32);
q11s32 = vsubq_s32(q11s32, q14s32);
q8s32 = vdupq_n_s32(sinpi_3_9);
q8s32 = vdupq_n_s32((int32_t)sinpi_3_9);
q11s32 = vsubq_s32(q11s32, q9s32);
q15s32 = vmulq_s32(q15s32, q8s32);
......
......@@ -12,26 +12,11 @@
#include <arm_neon.h>
#include <assert.h>
#include "./av1_rtcd.h"
#include "./aom_config.h"
#include "./av1_rtcd.h"
#include "aom_dsp/txfm_common.h"
#include "av1/common/common.h"
static int16_t cospi_2_64 = 16305;
static int16_t cospi_4_64 = 16069;
static int16_t cospi_6_64 = 15679;
static int16_t cospi_8_64 = 15137;
static int16_t cospi_10_64 = 14449;
static int16_t cospi_12_64 = 13623;
static int16_t cospi_14_64 = 12665;
static int16_t cospi_16_64 = 11585;
static int16_t cospi_18_64 = 10394;
static int16_t cospi_20_64 = 9102;
static int16_t cospi_22_64 = 7723;
static int16_t cospi_24_64 = 6270;
static int16_t cospi_26_64 = 4756;
static int16_t cospi_28_64 = 3196;
static int16_t cospi_30_64 = 1606;
static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16,
int16x8_t *q10s16, int16x8_t *q11s16,
int16x8_t *q12s16, int16x8_t *q13s16,
......@@ -108,10 +93,10 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
int32x4_t q2s32, q3s32, q5s32, q6s32, q8s32, q9s32;
int32x4_t q10s32, q11s32, q12s32, q13s32, q15s32;
d0s16 = vdup_n_s16(cospi_28_64);
d1s16 = vdup_n_s16(cospi_4_64);
d2s16 = vdup_n_s16(cospi_12_64);
d3s16 = vdup_n_s16(cospi_20_64);
d0s16 = vdup_n_s16((int16_t)cospi_28_64);
d1s16 = vdup_n_s16((int16_t)cospi_4_64);
d2s16 = vdup_n_s16((int16_t)cospi_12_64);
d3s16 = vdup_n_s16((int16_t)cospi_20_64);
d16s16 = vget_low_s16(*q8s16);
d17s16 = vget_high_s16(*q8s16);
......@@ -164,7 +149,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
q6s16 = vcombine_s16(d12s16, d13s16);
q7s16 = vcombine_s16(d14s16, d15s16);
d0s16 = vdup_n_s16(cospi_16_64);
d0s16 = vdup_n_s16((int16_t)cospi_16_64);
q2s32 = vmull_s16(d16s16, d0s16);
q3s32 = vmull_s16(d17s16, d0s16);
......@@ -176,8 +161,8 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
q13s32 = vmlsl_s16(q13s32, d24s16, d0s16);
q15s32 = vmlsl_s16(q15s32, d25s16, d0s16);
d0s16 = vdup_n_s16(cospi_24_64);
d1s16 = vdup_n_s16(cospi_8_64);
d0s16 = vdup_n_s16((int16_t)cospi_24_64);
d1s16 = vdup_n_s16((int16_t)cospi_8_64);
d18s16 = vqrshrn_n_s32(q2s32, 14);
d19s16 = vqrshrn_n_s32(q3s32, 14);
......@@ -217,7 +202,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
d28s16 = vget_low_s16(*q14s16);
d29s16 = vget_high_s16(*q14s16);
d16s16 = vdup_n_s16(cospi_16_64);
d16s16 = vdup_n_s16((int16_t)cospi_16_64);
q9s32 = vmull_s16(d28s16, d16s16);
q10s32 = vmull_s16(d29s16, d16s16);
......@@ -276,16 +261,16 @@ static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
d30s16 = vget_low_s16(*q15s16);
d31s16 = vget_high_s16(*q15s16);
d14s16 = vdup_n_s16(cospi_2_64);
d15s16 = vdup_n_s16(cospi_30_64);
d14s16 = vdup_n_s16((int16_t)cospi_2_64);
d15s16 = vdup_n_s16((int16_t)cospi_30_64);
q1s32 = vmull_s16(d30s16, d14s16);
q2s32 = vmull_s16(d31s16, d14s16);
q3s32 = vmull_s16(d30s16, d15s16);
q4s32 = vmull_s16(d31s16, d15s16);
d30s16 = vdup_n_s16(cospi_18_64);
d31s16 = vdup_n_s16(cospi_14_64);
d30s16 = vdup_n_s16((int16_t)cospi_18_64);
d31s16 = vdup_n_s16((int16_t)cospi_14_64);
q1s32 = vmlal_s16(q1s32, d16s16, d15s16);
q2s32 = vmlal_s16(q2s32, d17s16, d15s16);
......@@ -324,15 +309,15 @@ static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
d7s16 = vqrshrn_n_s32(q4s32, 14);
*q12s16 = vcombine_s16(d24s16, d25s16);
d0s16 = vdup_n_s16(cospi_10_64);
d1s16 = vdup_n_s16(cospi_22_64);
d0s16 = vdup_n_s16((int16_t)cospi_10_64);
d1s16 = vdup_n_s16((int16_t)cospi_22_64);
q4s32 = vmull_s16(d26s16, d0s16);
q5s32 = vmull_s16(d27s16, d0s16);
q2s32 = vmull_s16(d26s16, d1s16);
q6s32 = vmull_s16(d27s16, d1s16);
d30s16 = vdup_n_s16(cospi_26_64);
d31s16 = vdup_n_s16(cospi_6_64);
d30s16 = vdup_n_s16((int16_t)cospi_26_64);
d31s16 = vdup_n_s16((int16_t)cospi_6_64);
q4s32 = vmlal_s16(q4s32, d20s16, d1s16);
q5s32 = vmlal_s16(q5s32, d21s16, d1s16);
......@@ -367,8 +352,8 @@ static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
q4s32 = vsubq_s32(q4s32, q0s32);
q5s32 = vsubq_s32(q5s32, q13s32);
d30s16 = vdup_n_s16(cospi_8_64);
d31s16 = vdup_n_s16(cospi_24_64);
d30s16 = vdup_n_s16((int16_t)cospi_8_64);
d31s16 = vdup_n_s16((int16_t)cospi_24_64);
d18s16 = vqrshrn_n_s32(q9s32, 14);
d19s16 = vqrshrn_n_s32(q10s32, 14);
......@@ -423,7 +408,7 @@ static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
d15s16 = vqrshrn_n_s32(q0s32, 14);
*q14s16 = vcombine_s16(d28s16, d29s16);
d30s16 = vdup_n_s16(cospi_16_64);
d30s16 = vdup_n_s16((int16_t)cospi_16_64);
d22s16 = vget_low_s16(*q11s16);
d23s16 = vget_high_s16(*q11s16);
......
......@@ -9,8 +9,9 @@
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include "./av1_rtcd.h"
#include "av1/common/av1_fwd_txfm.h"
#include <assert.h>
#include "./av1_rtcd.h"
void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
// The 2D transform is done with two passes which are actually pretty
......@@ -22,36 +23,37 @@ void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
int pass;
// We need an intermediate buffer between passes.
tran_low_t intermediate[4 * 4];
const int16_t *in_pass0 = input;
const tran_low_t *in = NULL;
const tran_low_t *in_low = NULL;
tran_low_t *out = intermediate;
// Do the two transform/transpose passes
for (pass = 0; pass < 2; ++pass) {
tran_high_t input[4]; // canbe16
tran_high_t in_high[4]; // canbe16
tran_high_t step[4]; // canbe16
tran_high_t temp1, temp2; // needs32
int i;
for (i = 0; i < 4; ++i) {
// Load inputs.
if (0 == pass) {
input[0] = in_pass0[0 * stride] * 16;
input[1] = in_pass0[1 * stride] * 16;
input[2] = in_pass0[2 * stride] * 16;
input[3] = in_pass0[3 * stride] * 16;
if (i == 0 && input[0]) {
input[0] += 1;
in_high[0] = input[0 * stride] * 16;
in_high[1] = input[1 * stride] * 16;
in_high[2] = input[2 * stride] * 16;
in_high[3] = input[3 * stride] * 16;
if (i == 0 && in_high[0]) {
in_high[0] += 1;
}
} else {
input[0] = in[0 * 4];
input[1] = in[1 * 4];
input[2] = in[2 * 4];
input[3] = in[3 * 4];
assert(in_low != NULL);
in_high[0] = in_low[0 * 4];
in_high[1] = in_low[1 * 4];
in_high[2] = in_low[2 * 4];
in_high[3] = in_low[3 * 4];
in_low++;
}
// Transform.
step[0] = input[0] + input[3];
step[1] = input[1] + input[2];
step[2] = input[1] - input[2];
step[3] = input[0] - input[3];
step[0] = in_high[0] + in_high[3];
step[1] = in_high[1] + in_high[2];
step[2] = in_high[1] - in_high[2];
step[3] = in_high[0] - in_high[3];
temp1 = (step[0] + step[1]) * cospi_16_64;
temp2 = (step[0] - step[1]) * cospi_16_64;
out[0] = (tran_low_t)fdct_round_shift(temp1);
......@@ -61,12 +63,11 @@ void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
out[1] = (tran_low_t)fdct_round_shift(temp1);
out[3] = (tran_low_t)fdct_round_shift(temp2);
// Do next column (which is a transposed row in second/horizontal pass)
in_pass0++;
in++;
input++;
out += 4;
}
// Setup in/out for next pass.
in = intermediate;
// Setup in_low/out for next pass.
in_low = intermediate;
out = output;
}
......@@ -101,7 +102,6 @@ void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) {
tran_high_t t0, t1, t2, t3; // needs32
tran_high_t x0, x1, x2, x3; // canbe16
int i;
for (i = 0; i < 8; i++) {
// stage 1
if (pass == 0) {
......@@ -193,56 +193,57 @@ void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
int pass;
// We need an intermediate buffer between passes.
tran_low_t intermediate[256];
const int16_t *in_pass0 = input;
const tran_low_t *in = NULL;
const tran_low_t *in_low = NULL;
tran_low_t *out = intermediate;
// Do the two transform/transpose passes
for (pass = 0; pass < 2; ++pass) {
tran_high_t step1[8]; // canbe16
tran_high_t step2[8]; // canbe16
tran_high_t step3[8]; // canbe16
tran_high_t input[8]; // canbe16
tran_high_t in_high[8]; // canbe16
tran_high_t temp1, temp2; // needs32
int i;
for (i = 0; i < 16; i++) {
if (0 == pass) {
// Calculate input for the first 8 results.
input[0] = (in_pass0[0 * stride] + in_pass0[15 * stride]) * 4;
input[1] = (in_pass0[1 * stride] + in_pass0[14 * stride]) * 4;
input[2] = (in_pass0[2 * stride] + in_pass0[13 * stride]) * 4;
input[3] = (in_pass0[3 * stride] + in_pass0[12 * stride]) * 4;
input[4] = (in_pass0[4 * stride] + in_pass0[11 * stride]) * 4;
input[5] = (in_pass0[5 * stride] + in_pass0[10 * stride]) * 4;
input[6] = (in_pass0[6 * stride] + in_pass0[9 * stride]) * 4;
input[7] = (in_pass0[7 * stride] + in_pass0[8 * stride]) * 4;
in_high[0] = (input[0 * stride] + input[15 * stride]) * 4;
in_high[1] = (input[1 * stride] + input[14 * stride]) * 4;
in_high[2] = (input[2 * stride] + input[13 * stride]) * 4;
in_high[3] = (input[3 * stride] + input[12 * stride]) * 4;
in_high[4] = (input[4 * stride] + input[11 * stride]) * 4;
in_high[5] = (input[5 * stride] + input[10 * stride]) * 4;
in_high[6] = (input[6 * stride] + input[9 * stride]) * 4;
in_high[7] = (input[7 * stride] + input[8 * stride]) * 4;
// Calculate input for the next 8 results.
step1[0] = (in_pass0[7 * stride] - in_pass0[8 * stride]) * 4;
step1[1] = (in_pass0[6 * stride] - in_pass0[9 * stride]) * 4;
step1[2] = (in_pass0[5 * stride] - in_pass0[10 * stride]) * 4;
step1[3] = (in_pass0[4 * stride] - in_pass0[11 * stride]) * 4;
step1[4] = (in_pass0[3 * stride] - in_pass0[12 * stride]) * 4;
step1[5] = (in_pass0[2 * stride] - in_pass0[13 * stride]) * 4;
step1[6] = (in_pass0[1 * stride] - in_pass0[14 * stride]) * 4;
step1[7] = (in_pass0[0 * stride] - in_pass0[15 * stride]) * 4;
step1[0] = (input[7 * stride] - input[8 * stride]) * 4;
step1[1] = (input[6 * stride] - input[9 * stride]) * 4;
step1[2] = (input[5 * stride] - input[10 * stride]) * 4;
step1[3] = (input[4 * stride] - input[11 * stride]) * 4;
step1[4] = (input[3 * stride] - input[12 * stride]) * 4;
step1[5] = (input[2 * stride] - input[13 * stride]) * 4;
step1[6] = (input[1 * stride] - input[14 * stride]) * 4;
step1[7] = (input[0 * stride] - input[15 * stride]) * 4;
} else {
// Calculate input for the first 8 results.
input[0] = ((in[0 * 16] + 1) >> 2) + ((in[15 * 16] + 1) >> 2);
input[1] = ((in[1 * 16] + 1) >> 2) + ((in[14 * 16] + 1) >> 2);
input[2] = ((in[2 * 16] + 1) >> 2) + ((in[13 * 16] + 1) >> 2);
input[3] = ((in[3 * 16] + 1) >> 2) + ((in[12 * 16] + 1) >> 2);
input[4] = ((in[4 * 16] + 1) >> 2) + ((in[11 * 16] + 1) >> 2);
input[5] = ((in[5 * 16] + 1) >> 2) + ((in[10 * 16] + 1) >> 2);
input[6] = ((in[6 * 16] + 1) >> 2) + ((in[9 * 16] + 1) >> 2);
input[7] = ((in[7 * 16] + 1) >> 2) + ((in[8 * 16] + 1) >> 2);
assert(in_low != NULL);
in_high[0] = ((in_low[0 * 16] + 1) >> 2) + ((in_low[15 * 16] + 1) >> 2);
in_high[1] = ((in_low[1 * 16] + 1) >> 2) + ((in_low[14 * 16] + 1) >> 2);
in_high[2] = ((in_low[2 * 16] + 1) >> 2) + ((in_low[13 * 16] + 1) >> 2);
in_high[3] = ((in_low[3 * 16] + 1) >> 2) + ((in_low[12 * 16] + 1) >> 2);
in_high[4] = ((in_low[4 * 16] + 1) >> 2) + ((in_low[11 * 16] + 1) >> 2);
in_high[5] = ((in_low[5 * 16] + 1) >> 2) + ((in_low[10 * 16] + 1) >> 2);
in_high[6] = ((in_low[6 * 16] + 1) >> 2) + ((in_low[9 * 16] + 1) >> 2);
in_high[7] = ((in_low[7 * 16] + 1) >> 2) + ((in_low[8 * 16] + 1) >> 2);
// Calculate input for the next 8 results.
step1[0] = ((in[7 * 16] + 1) >> 2) - ((in[8 * 16] + 1) >> 2);
step1[1] = ((in[6 * 16] + 1) >> 2) - ((in[9 * 16] + 1) >> 2);
step1[2] = ((in[5 * 16] + 1) >> 2) - ((in[10 * 16] + 1) >> 2);
step1[3] = ((in[4 * 16] + 1) >> 2) - ((in[11 * 16] + 1) >> 2);
step1[4] = ((in[3 * 16] + 1) >> 2) - ((in[12 * 16] + 1) >> 2);
step1[5] = ((in[2 * 16] + 1) >> 2) - ((in[13 * 16] + 1) >> 2);
step1[6] = ((in[1 * 16] + 1) >> 2) - ((in[14 * 16] + 1) >> 2);
step1[7] = ((in[0 * 16] + 1) >> 2) - ((in[15 * 16] + 1) >> 2);
step1[0] = ((in_low[7 * 16] + 1) >> 2) - ((in_low[8 * 16] + 1) >> 2);
step1[1] = ((in_low[6 * 16] + 1) >> 2) - ((in_low[9 * 16] + 1) >> 2);
step1[2] = ((in_low[5 * 16] + 1) >> 2) - ((in_low[10 * 16] + 1) >> 2);
step1[3] = ((in_low[4 * 16] + 1) >> 2) - ((in_low[11 * 16] + 1) >> 2);
step1[4] = ((in_low[3 * 16] + 1) >> 2) - ((in_low[12 * 16] + 1) >> 2);
step1[5] = ((in_low[2 * 16] + 1) >> 2) - ((in_low[13 * 16] + 1) >> 2);
step1[6] = ((in_low[1 * 16] + 1) >> 2) - ((in_low[14 * 16] + 1) >> 2);
step1[7] = ((in_low[0 * 16] + 1) >> 2) - ((in_low[15 * 16] + 1) >> 2);
in_low++;
}
// Work on the first eight values; fdct8(input, even_results);
{
......@@ -251,14 +252,14 @@ void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
tran_high_t x0, x1, x2, x3; // canbe16
// stage 1
s0 = input[0] + input[7];
s1 = input[1] + input[6];
s2 = input[2] + input[5];
s3 = input[3] + input[4];
s4 = input[3] - input[4];
s5 = input[2] - input[5];
s6 = input[1] - input[6];
s7 = input[0] - input[7];
s0 = in_high[0] + in_high[7];
s1 = in_high[1] + in_high[6];
s2 = in_high[2] + in_high[5];
s3 = in_high[3] + in_high[4];
s4 = in_high[3] - in_high[4];
s5 = in_high[2] - in_high[5];
s6 = in_high[1] - in_high[6];
s7 = in_high[0] - in_high[7];
// fdct4(step, step);
x0 = s0 + s3;
......@@ -353,12 +354,11 @@ void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
out[15] = (tran_low_t)fdct_round_shift(temp2);
}
// Do next column (which is a transposed row in second/horizontal pass)
in++;
in_pass0++;
input++;
out += 16;
}
// Setup in/out for next pass.
in = intermediate;
in_low = intermediate;
out = output;
}
}
......
......@@ -889,42 +889,44 @@ void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
break;
default:
for (idx_32 = 0; idx_32 < 4; mip += offset_32[idx_32], ++idx_32) {
const int shift_y = shift_32_y[idx_32];
const int shift_uv = shift_32_uv[idx_32];
const int shift_y_32 = shift_32_y[idx_32];
const int shift_uv_32 = shift_32_uv[idx_32];
const int mi_32_col_offset = ((idx_32 & 1) << 2);
const int mi_32_row_offset = ((idx_32 >> 1) << 2);
if (mi_32_col_offset >= max_cols || mi_32_row_offset >= max_rows)
continue;
switch (mip[0]->mbmi.sb_type) {
case BLOCK_32X32:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
break;
case BLOCK_32X16: build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
case BLOCK_32X16:
build_masks(lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
#if CONFIG_SUPERTX
if (supertx_enabled(&mip[0]->mbmi)) break;
#endif
if (mi_32_row_offset + 2 >= max_rows) continue;
mip2 = mip + mode_info_stride * 2;
build_masks(lfi_n, mip2[0], shift_y + 16, shift_uv + 4, lfm);
build_masks(lfi_n, mip2[0], shift_y_32 + 16, shift_uv_32 + 4, lfm);
break;
case BLOCK_16X32: build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
case BLOCK_16X32:
build_masks(lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
#if CONFIG_SUPERTX
if (supertx_enabled(&mip[0]->mbmi)) break;
#endif
if (mi_32_col_offset + 2 >= max_cols) continue;
mip2 = mip + 2;
build_masks(lfi_n, mip2[0], shift_y + 2, shift_uv + 1, lfm);
build_masks(lfi_n, mip2[0], shift_y_32 + 2, shift_uv_32 + 1, lfm);
break;
default:
#if CONFIG_SUPERTX
if (mip[0]->mbmi.tx_size == TX_32X32) {
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32, shift_uv_32, lfm);
break;
}
#endif
for (idx_16 = 0; idx_16 < 4; mip += offset_16[idx_16], ++idx_16) {
const int shift_y = shift_32_y[idx_32] + shift_16_y[idx_16];
const int shift_uv = shift_32_uv[idx_32] + shift_16_uv[idx_16];
const int shift_y_32_16 = shift_y_32 + shift_16_y[idx_16];
const int shift_uv_32_16 = shift_uv_32 + shift_16_uv[idx_16];
const int mi_16_col_offset =
mi_32_col_offset + ((idx_16 & 1) << 1);
const int mi_16_row_offset =
......@@ -935,16 +937,18 @@ void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
switch (mip[0]->mbmi.sb_type) {
case BLOCK_16X16:
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
lfm);
break;
case BLOCK_16X8:
#if CONFIG_SUPERTX
if (supertx_enabled(&mip[0]->mbmi)) break;
#endif
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
lfm);
if (mi_16_row_offset + 1 >= max_rows) continue;
mip2 = mip + mode_info_stride;
build_y_mask(lfi_n, mip2[0], shift_y + 8,
build_y_mask(lfi_n, mip2[0], shift_y_32_16 + 8,
#if CONFIG_SUPERTX
0,
#endif
......@@ -954,29 +958,31 @@ void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
#if CONFIG_SUPERTX
if (supertx_enabled(&mip[0]->mbmi)) break;
#endif
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32_16, shift_uv_32_16,
lfm);
if (mi_16_col_offset + 1 >= max_cols) continue;
mip2 = mip + 1;
build_y_mask(lfi_n, mip2[0], shift_y + 1,
build_y_mask(lfi_n, mip2[0], shift_y_32_16 + 1,
#if CONFIG_SUPERTX
0,
#endif
lfm);
break;
default: {
const int shift_y =
shift_32_y[idx_32] + shift_16_y[idx_16] + shift_8_y[0];
const int shift_y_32_16_8_zero = shift_y_32_16 + shift_8_y[0];
#if CONFIG_SUPERTX
if (mip[0]->mbmi.tx_size == TX_16X16) {
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32_16_8_zero,
shift_uv_32_16, lfm);
break;
}
#endif
build_masks(lfi_n, mip[0], shift_y, shift_uv, lfm);
build_masks(lfi_n, mip[0], shift_y_32_16_8_zero,
shift_uv_32_16, lfm);
mip += offset[0];
for (idx_8 = 1; idx_8 < 4; mip += offset[idx_8], ++idx_8) {
const int shift_y = shift_32_y[idx_32] +
shift_16_y[idx_16] + shift_8_y[idx_8];
const int shift_y_32_16_8 =
shift_y_32_16 + shift_8_y[idx_8];
const int mi_8_col_offset =
mi_16_col_offset + ((idx_8 & 1));
const int mi_8_row_offset =
......@@ -985,7 +991,7 @@ void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
if (mi_8_col_offset >= max_cols ||
mi_8_row_offset >= max_rows)
continue;
build_y_mask(lfi_n, mip[0], shift_y,
build_y_mask(lfi_n, mip[0], shift_y_32_16_8,
#if CONFIG_SUPERTX
supertx_enabled(&mip[0]->mbmi),
#endif
......
......@@ -2388,7 +2388,6 @@ void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
#define IDCT32_34 \
/* Stage1 */ \
{ \
const __m128i zero = _mm_setzero_si128(); \
const __m128i lo_1_31 = _mm_unpacklo_epi16(in[1], zero); \
const __m128i hi_1_31 = _mm_unpackhi_epi16(in[1], zero); \
\
......@@ -2413,7 +2412,6 @@ void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
\
/* Stage2 */ \
{ \
const __m128i zero = _mm_setzero_si128(); \
const __m128i lo_2_30 = _mm_unpacklo_epi16(in[2], zero); \
const __m128i hi_2_30 = _mm_unpackhi_epi16(in[2], zero); \
\
......@@ -2440,7 +2438,6 @@ void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
\
/* Stage3 */ \
{ \
const __m128i zero = _mm_setzero_si128(); \
const __m128i lo_4_28 = _mm_unpacklo_epi16(in[4], zero); \
const __m128i hi_4_28 = _mm_unpackhi_epi16(in[4], zero); \
\
......@@ -2481,7 +2478,6 @@ void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
\
/* Stage4 */ \
{ \
const __m128i zero = _mm_setzero_si128(); \
const __m128i lo_0_16 = _mm_unpacklo_epi16(in[0], zero); \
const __m128i hi_0_16 = _mm_unpackhi_epi16(in[0], zero); \
\
......@@ -3018,6 +3014,7 @@ void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
// Only upper-left 8x8 has non-zero coeff
void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
int stride) {
const __m128i zero = _mm_setzero_si128();
const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
const __m128i final_rounding = _mm_set1_epi16(1 << 5);
......@@ -3123,7 +3120,6 @@ void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
col[31] = _mm_sub_epi16(stp1_0, stp1_31);
for (i = 0; i < 4; i++) {
int j;
const __m128i zero = _mm_setzero_si128();
// Transpose 32x8 block to 8x32 block
array_transpose_8x8(col + i * 8, in);
IDCT32_34
......
......@@ -3465,16 +3465,13 @@ static size_t read_uncompressed_header(AV1Decoder *pbi,
setup_segmentation(cm, rb);
{
int i;
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = cm->seg.enabled
? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;
xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
xd->qindex[i] = qindex;
}
for (i = 0; i < MAX_SEGMENTS; ++i) {
const int qindex = cm->seg.enabled
? av1_get_qindex(&cm->seg, i, cm->base_qindex)
: cm->base_qindex;