Commit c456b35f authored by Ronald S. Bultje's avatar Ronald S. Bultje

32x32 transform for superblocks.

This adds Debargha's DCT/DWT hybrid and a regular 32x32 DCT, and adds
code all over the place to wrap that in the bitstream/encoder/decoder/RD.

Some implementation notes (these probably need careful review):
- token range is extended by 1 bit, since the value range out of this
  transform is [-16384,16383].
- the coefficients coming out of the FDCT are manually scaled back by
  1 bit, or else they won't fit in int16_t (they are 17 bits). Because
  of this, the RD error scoring does not right-shift the MSE score by
  two (unlike for 4x4/8x8/16x16).
- to compensate for this loss in precision, the quantizer is halved
  also. This is currently a little hacky.
- FDCT and IDCT is double-only right now. Needs a fixed-point impl.
- There are no default probabilities for the 32x32 transform yet; I'm
  simply using the 16x16 luma ones. A future commit will add newly
  generated probabilities for all transforms.
- No ADST version. I don't think we'll add one for this level; if an
  ADST is desired, transform-size selection can scale back to 16x16
  or lower, and use an ADST at that level.

Additional notes specific to Debargha's DWT/DCT hybrid:
- coefficient scale is different for the top/left 16x16 (DCT-over-DWT)
  block than for the rest (DWT pixel differences) of the block. Therefore,
  RD error scoring isn't easily scalable between coefficient and pixel
  domain. Thus, unfortunately, we need to compute the RD distortion in
  the pixel domain until we figure out how to scale these appropriately.

Change-Id: I00386f20f35d7fabb19aba94c8162f8aee64ef2b
parent a36d9a4a
......@@ -247,6 +247,8 @@ EXPERIMENT_LIST="
implicit_segmentation
newbintramodes
comp_interintra_pred
tx32x32
dwt32x32hybrid
"
CONFIG_LIST="
external_build
......
/*
* Copyright (c) 2012 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
extern "C" {
#include "vp9/common/vp9_entropy.h"
#include "./vp9_rtcd.h"
void vp9_short_fdct32x32_c(int16_t *input, int16_t *out, int pitch);
void vp9_short_idct32x32_c(short *input, short *output, int pitch);
}
#include "test/acm_random.h"
#include "vpx/vpx_integer.h"
using libvpx_test::ACMRandom;
namespace {
#if !CONFIG_DWT32X32HYBRID
static const double kPi = 3.141592653589793238462643383279502884;
static void reference2_32x32_idct_2d(double *input, double *output) {
double x;
for (int l = 0; l < 32; ++l) {
for (int k = 0; k < 32; ++k) {
double s = 0;
for (int i = 0; i < 32; ++i) {
for (int j = 0; j < 32; ++j) {
x = cos(kPi * j * (l + 0.5) / 32.0) *
cos(kPi * i * (k + 0.5) / 32.0) * input[i * 32 + j] / 1024;
if (i != 0)
x *= sqrt(2.0);
if (j != 0)
x *= sqrt(2.0);
s += x;
}
}
output[k * 32 + l] = s / 4;
}
}
}
static void reference_32x32_dct_1d(double in[32], double out[32], int stride) {
const double kInvSqrt2 = 0.707106781186547524400844362104;
for (int k = 0; k < 32; k++) {
out[k] = 0.0;
for (int n = 0; n < 32; n++)
out[k] += in[n] * cos(kPi * (2 * n + 1) * k / 64.0);
if (k == 0)
out[k] = out[k] * kInvSqrt2;
}
}
static void reference_32x32_dct_2d(int16_t input[32*32], double output[32*32]) {
// First transform columns
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = input[j*32 + i];
reference_32x32_dct_1d(temp_in, temp_out, 1);
for (int j = 0; j < 32; ++j)
output[j * 32 + i] = temp_out[j];
}
// Then transform rows
for (int i = 0; i < 32; ++i) {
double temp_in[32], temp_out[32];
for (int j = 0; j < 32; ++j)
temp_in[j] = output[j + i*32];
reference_32x32_dct_1d(temp_in, temp_out, 1);
// Scale by some magic number
for (int j = 0; j < 32; ++j)
output[j + i * 32] = temp_out[j] / 4;
}
}
TEST(VP9Idct32x32Test, AccuracyCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
for (int i = 0; i < count_test_block; ++i) {
int16_t in[1024], coeff[1024];
int16_t out_c[1024];
double out_r[1024];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 1024; ++j)
in[j] = rnd.Rand8() - rnd.Rand8();
reference_32x32_dct_2d(in, out_r);
for (int j = 0; j < 1024; j++)
coeff[j] = round(out_r[j]);
vp9_short_idct32x32_c(coeff, out_c, 64);
for (int j = 0; j < 1024; ++j) {
const int diff = out_c[j] - in[j];
const int error = diff * diff;
EXPECT_GE(1, error)
<< "Error: 3x32 IDCT has error " << error
<< " at index " << j;
}
vp9_short_fdct32x32_c(in, out_c, 64);
for (int j = 0; j < 1024; ++j) {
const double diff = coeff[j] - out_c[j];
const double error = diff * diff;
EXPECT_GE(1.0, error)
<< "Error: 32x32 FDCT has error " << error
<< " at index " << j;
}
}
}
#else // CONFIG_DWT32X32HYBRID
// TODO(rbultje/debargha): add DWT-specific tests
#endif // CONFIG_DWT32X32HYBRID
TEST(VP9Fdct32x32Test, AccuracyCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
unsigned int max_error = 0;
int64_t total_error = 0;
const int count_test_block = 1000;
for (int i = 0; i < count_test_block; ++i) {
int16_t test_input_block[1024];
int16_t test_temp_block[1024];
int16_t test_output_block[1024];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 1024; ++j)
test_input_block[j] = rnd.Rand8() - rnd.Rand8();
const int pitch = 64;
vp9_short_fdct32x32_c(test_input_block, test_temp_block, pitch);
vp9_short_idct32x32_c(test_temp_block, test_output_block, pitch);
for (int j = 0; j < 1024; ++j) {
const unsigned diff = test_input_block[j] - test_output_block[j];
const unsigned error = diff * diff;
if (max_error < error)
max_error = error;
total_error += error;
}
}
EXPECT_GE(1u, max_error)
<< "Error: 32x32 FDCT/IDCT has an individual roundtrip error > 1";
EXPECT_GE(count_test_block/10, total_error)
<< "Error: 32x32 FDCT/IDCT has average roundtrip error > 1/10 per block";
}
TEST(VP9Fdct32x32Test, CoeffSizeCheck) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
for (int i = 0; i < count_test_block; ++i) {
int16_t input_block[1024], input_extreme_block[1024];
int16_t output_block[1024], output_extreme_block[1024];
// Initialize a test block with input range [-255, 255].
for (int j = 0; j < 1024; ++j) {
input_block[j] = rnd.Rand8() - rnd.Rand8();
input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255;
}
if (i == 0)
for (int j = 0; j < 1024; ++j)
input_extreme_block[j] = 255;
const int pitch = 32;
vp9_short_fdct32x32_c(input_block, output_block, pitch);
vp9_short_fdct32x32_c(input_extreme_block, output_extreme_block, pitch);
// The minimum quant value is 4.
for (int j = 0; j < 1024; ++j) {
EXPECT_GE(4*DCT_MAX_VALUE, abs(output_block[j]))
<< "Error: 32x32 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
EXPECT_GE(4*DCT_MAX_VALUE, abs(output_extreme_block[j]))
<< "Error: 32x32 FDCT extreme has coefficient larger than "
"4*DCT_MAX_VALUE";
}
}
}
} // namespace
......@@ -64,6 +64,9 @@ endif
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct4x4_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
#LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += dct16x16_test.cc
ifeq ($(CONFIG_VP9_ENCODER)$(CONFIG_TX32X32),yesyes)
LIBVPX_TEST_SRCS-yes += dct32x32_test.cc
endif
LIBVPX_TEST_SRCS-yes += idct8x8_test.cc
LIBVPX_TEST_SRCS-yes += variance_test.cc
endif # VP9
......
......@@ -129,7 +129,13 @@ typedef enum {
TX_4X4, // 4x4 dct transform
TX_8X8, // 8x8 dct transform
TX_16X16, // 16x16 dct transform
TX_SIZE_MAX // Number of different transforms available
TX_SIZE_MAX_MB, // Number of transforms available to MBs
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
TX_32X32 = TX_SIZE_MAX_MB, // 32x32 dct transform
TX_SIZE_MAX_SB, // Number of transforms available to SBs
#else
TX_SIZE_MAX_SB = TX_SIZE_MAX_MB,
#endif
} TX_SIZE;
typedef enum {
......@@ -302,6 +308,15 @@ typedef struct blockd {
union b_mode_info bmi;
} BLOCKD;
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
typedef struct superblockd {
/* 32x32 Y and 16x16 U/V. No 2nd order transform yet. */
DECLARE_ALIGNED(16, short, diff[32*32+16*16*2]);
DECLARE_ALIGNED(16, short, qcoeff[32*32+16*16*2]);
DECLARE_ALIGNED(16, short, dqcoeff[32*32+16*16*2]);
} SUPERBLOCKD;
#endif
typedef struct macroblockd {
DECLARE_ALIGNED(16, short, diff[400]); /* from idct diff */
DECLARE_ALIGNED(16, unsigned char, predictor[384]);
......@@ -309,6 +324,10 @@ typedef struct macroblockd {
DECLARE_ALIGNED(16, short, dqcoeff[400]);
DECLARE_ALIGNED(16, unsigned short, eobs[25]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
SUPERBLOCKD sb_coeff_data;
#endif
/* 16 Y blocks, 4 U, 4 V, 1 DC 2nd order block, each with 16 entries. */
BLOCKD block[25];
int fullpixel_mask;
......
......@@ -1375,3 +1375,5 @@ static const vp9_prob
}
}
};
#define default_coef_probs_32x32 default_coef_probs_16x16
This diff is collapsed.
......@@ -55,7 +55,7 @@ extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
#define PROB_UPDATE_BASELINE_COST 7
#define MAX_PROB 255
#define DCT_MAX_VALUE 8192
#define DCT_MAX_VALUE 16384
/* Coefficients are predicted via a 3-dimensional probability table. */
......@@ -66,6 +66,10 @@ extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
#define BLOCK_TYPES_16X16 4
#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
#define BLOCK_TYPES_32X32 4
#endif
/* Middle dimension is a coarsening of the coefficient's
position within the 4x4 DCT. */
......@@ -73,6 +77,9 @@ extern vp9_extra_bit_struct vp9_extra_bits[12]; /* indexed by token value */
extern DECLARE_ALIGNED(16, const int, vp9_coef_bands[16]);
extern DECLARE_ALIGNED(64, const int, vp9_coef_bands_8x8[64]);
extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_16x16[256]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
extern DECLARE_ALIGNED(16, const int, vp9_coef_bands_32x32[1024]);
#endif
/* Inside dimension is 3-valued measure of nearby complexity, that is,
the extent to which nearby coefficients are nonzero. For the first
......@@ -106,9 +113,13 @@ extern DECLARE_ALIGNED(16, const int, vp9_col_scan[16]);
extern DECLARE_ALIGNED(16, const int, vp9_row_scan[16]);
extern DECLARE_ALIGNED(64, const int, vp9_default_zig_zag1d_8x8[64]);
extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]);
#if CONFIG_SUPERBLOCKS && CONFIG_TX32X32
extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_32x32[1024]);
#endif
void vp9_coef_tree_initialize(void);
extern DECLARE_ALIGNED(16, const int, vp9_default_zig_zag1d_16x16[256]);
void vp9_adapt_coef_probs(struct VP9Common *);
#endif
This diff is collapsed.
......@@ -143,3 +143,16 @@ void vp9_inverse_transform_mb_16x16(MACROBLOCKD *xd) {
vp9_inverse_transform_mby_16x16(xd);
vp9_inverse_transform_mbuv_8x8(xd);
}
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
void vp9_inverse_transform_sby_32x32(SUPERBLOCKD *xd_sb) {
vp9_short_idct32x32(xd_sb->dqcoeff, xd_sb->diff, 64);
}
void vp9_inverse_transform_sbuv_16x16(SUPERBLOCKD *xd_sb) {
vp9_inverse_transform_b_16x16(xd_sb->dqcoeff + 1024,
xd_sb->diff + 1024, 32);
vp9_inverse_transform_b_16x16(xd_sb->dqcoeff + 1280,
xd_sb->diff + 1280, 32);
}
#endif
......@@ -38,4 +38,9 @@ extern void vp9_inverse_transform_mb_16x16(MACROBLOCKD *xd);
extern void vp9_inverse_transform_mby_16x16(MACROBLOCKD *xd);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
extern void vp9_inverse_transform_sby_32x32(SUPERBLOCKD *xd_sb);
extern void vp9_inverse_transform_sbuv_16x16(SUPERBLOCKD *xd_sb);
#endif
#endif // __INC_INVTRANS_H
......@@ -192,6 +192,9 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
/* Point at base of Mb MODE_INFO list */
const MODE_INFO *mode_info_context = cm->mi;
#if CONFIG_SUPERBLOCKS
const int mis = cm->mode_info_stride;
#endif
/* Initialize the loop filter for this frame. */
vp9_loop_filter_frame_init(cm, xd, cm->filter_level);
......@@ -226,14 +229,18 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
if (mb_col > 0
#if CONFIG_SUPERBLOCKS
&& !((mb_col & 1) && mode_info_context->mbmi.encoded_as_sb &&
mode_info_context[0].mbmi.mb_skip_coeff &&
((mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-1].mbmi.mb_skip_coeff)
#if CONFIG_TX32X32
|| mode_info_context[-1].mbmi.txfm_size == TX_32X32
#endif
))
#endif
)
vp9_loop_filter_mbv(y_ptr, u_ptr, v_ptr, post->y_stride,
post->uv_stride, &lfi);
if (!skip_lf && tx_type != TX_16X16) {
if (!skip_lf && tx_type < TX_16X16) {
if (tx_type == TX_8X8)
vp9_loop_filter_bv8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
post->uv_stride, &lfi);
......@@ -247,14 +254,18 @@ void vp9_loop_filter_frame(VP9_COMMON *cm, MACROBLOCKD *xd) {
if (mb_row > 0
#if CONFIG_SUPERBLOCKS
&& !((mb_row & 1) && mode_info_context->mbmi.encoded_as_sb &&
mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-cm->mode_info_stride].mbmi.mb_skip_coeff)
((mode_info_context[0].mbmi.mb_skip_coeff &&
mode_info_context[-mis].mbmi.mb_skip_coeff)
#if CONFIG_TX32X32
|| mode_info_context[-mis].mbmi.txfm_size == TX_32X32
#endif
))
#endif
)
vp9_loop_filter_mbh(y_ptr, u_ptr, v_ptr, post->y_stride,
post->uv_stride, &lfi);
if (!skip_lf && tx_type != TX_16X16) {
if (!skip_lf && tx_type < TX_16X16) {
if (tx_type == TX_8X8)
vp9_loop_filter_bh8x8(y_ptr, u_ptr, v_ptr, post->y_stride,
post->uv_stride, &lfi);
......
......@@ -58,6 +58,9 @@ typedef struct frame_contexts {
vp9_prob hybrid_coef_probs_8x8 [BLOCK_TYPES_8X8] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
vp9_prob coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
vp9_prob hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
vp9_prob coef_probs_32x32 [BLOCK_TYPES_32X32] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
nmv_context nmvc;
nmv_context pre_nmvc;
......@@ -95,6 +98,11 @@ typedef struct frame_contexts {
vp9_prob pre_hybrid_coef_probs_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
vp9_prob pre_coef_probs_32x32 [BLOCK_TYPES_32X32] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [ENTROPY_NODES];
#endif
unsigned int coef_counts [BLOCK_TYPES] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
unsigned int hybrid_coef_counts [BLOCK_TYPES] [COEF_BANDS]
......@@ -110,6 +118,11 @@ typedef struct frame_contexts {
unsigned int hybrid_coef_counts_16x16 [BLOCK_TYPES_16X16] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
unsigned int coef_counts_32x32 [BLOCK_TYPES_32X32] [COEF_BANDS]
[PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS];
#endif
nmv_context_counts NMVcount;
vp9_prob switchable_interp_prob[VP9_SWITCHABLE_FILTERS + 1]
[VP9_SWITCHABLE_FILTERS - 1];
......@@ -139,8 +152,11 @@ typedef enum {
ONLY_4X4 = 0,
ALLOW_8X8 = 1,
ALLOW_16X16 = 2,
TX_MODE_SELECT = 3,
NB_TXFM_MODES = 4,
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
ALLOW_32X32 = 3,
#endif
TX_MODE_SELECT = 3 + (CONFIG_TX32X32 && CONFIG_SUPERBLOCKS),
NB_TXFM_MODES = 4 + (CONFIG_TX32X32 && CONFIG_SUPERBLOCKS),
} TXFM_MODE;
typedef struct VP9Common {
......@@ -268,7 +284,7 @@ typedef struct VP9Common {
vp9_prob prob_comppred[COMP_PRED_CONTEXTS];
// FIXME contextualize
vp9_prob prob_tx[TX_SIZE_MAX - 1];
vp9_prob prob_tx[TX_SIZE_MAX_SB - 1];
vp9_prob mbskip_pred_probs[MBSKIP_CONTEXTS];
......
......@@ -168,6 +168,53 @@ void vp9_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
}
}
}
#if CONFIG_TX32X32
void vp9_recon_sby_s_c(MACROBLOCKD *xd, uint8_t *dst) {
int x, y, stride = xd->block[0].dst_stride;
short *diff = xd->sb_coeff_data.diff;
for (y = 0; y < 32; y++) {
for (x = 0; x < 32; x++) {
int a = dst[x] + diff[x];
if (a < 0)
a = 0;
else if (a > 255)
a = 255;
dst[x] = a;
}
dst += stride;
diff += 32;
}
}
void vp9_recon_sbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
int x, y, stride = xd->block[16].dst_stride;
short *udiff = xd->sb_coeff_data.diff + 1024;
short *vdiff = xd->sb_coeff_data.diff + 1280;
for (y = 0; y < 16; y++) {
for (x = 0; x < 16; x++) {
int u = udst[x] + udiff[x];
int v = vdst[x] + vdiff[x];
if (u < 0)
u = 0;
else if (u > 255)
u = 255;
if (v < 0)
v = 0;
else if (v > 255)
v = 255;
udst[x] = u;
vdst[x] = v;
}
udst += stride;
vdst += stride;
udiff += 16;
vdiff += 16;
}
}
#endif
#endif
void vp9_recon_mby_c(MACROBLOCKD *xd) {
......
......@@ -361,6 +361,9 @@ specialize vp9_short_idct16x16
prototype void vp9_short_idct10_16x16 "short *input, short *output, int pitch"
specialize vp9_short_idct10_16x16
prototype void vp9_short_idct32x32 "short *input, short *output, int pitch"
specialize vp9_short_idct32x32
prototype void vp9_ihtllm "const short *input, short *output, int pitch, int tx_type, int tx_dim"
specialize vp9_ihtllm
......@@ -640,6 +643,9 @@ specialize vp9_short_fdct8x4
prototype void vp9_short_walsh4x4 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_walsh4x4
prototype void vp9_short_fdct32x32 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fdct32x32
prototype void vp9_short_fdct16x16 "short *InputData, short *OutputData, int pitch"
specialize vp9_short_fdct16x16
......
......@@ -14,7 +14,7 @@
static const int segfeaturedata_signed[SEG_LVL_MAX] = { 1, 1, 0, 0, 0, 0 };
static const int seg_feature_data_max[SEG_LVL_MAX] =
{ MAXQ, 63, 0xf, MB_MODE_COUNT - 1, 255, TX_SIZE_MAX - 1};
{ MAXQ, 63, 0xf, MB_MODE_COUNT - 1, 255, TX_SIZE_MAX_SB - 1};
// These functions provide access to new segment level features.
// Eventually these function may be "optimized out" but for the moment,
......
......@@ -209,8 +209,17 @@ static void kfread_modes(VP9D_COMP *pbi,
m->mbmi.mode <= I8X8_PRED) {
// FIXME(rbultje) code ternary symbol once all experiments are merged
m->mbmi.txfm_size = vp9_read(bc, cm->prob_tx[0]);
if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED)
if (m->mbmi.txfm_size != TX_4X4 && m->mbmi.mode != I8X8_PRED) {
m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[1]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
if (m->mbmi.txfm_size != TX_8X8 && m->mbmi.encoded_as_sb)
m->mbmi.txfm_size += vp9_read(bc, cm->prob_tx[2]);
#endif
}
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
} else if (cm->txfm_mode >= ALLOW_32X32 && m->mbmi.encoded_as_sb) {
m->mbmi.txfm_size = TX_32X32;
#endif
} else if (cm->txfm_mode >= ALLOW_16X16 && m->mbmi.mode <= TM_PRED) {
m->mbmi.txfm_size = TX_16X16;
} else if (cm->txfm_mode >= ALLOW_8X8 && m->mbmi.mode != B_PRED) {
......@@ -1219,8 +1228,17 @@ static void read_mb_modes_mv(VP9D_COMP *pbi, MODE_INFO *mi, MB_MODE_INFO *mbmi,
// FIXME(rbultje) code ternary symbol once all experiments are merged
mbmi->txfm_size = vp9_read(bc, cm->prob_tx[0]);
if (mbmi->txfm_size != TX_4X4 && mbmi->mode != I8X8_PRED &&
mbmi->mode != SPLITMV)
mbmi->mode != SPLITMV) {
mbmi->txfm_size += vp9_read(bc, cm->prob_tx[1]);
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
if (mbmi->encoded_as_sb && mbmi->txfm_size != TX_8X8)
mbmi->txfm_size += vp9_read(bc, cm->prob_tx[2]);
#endif
}
#if CONFIG_TX32X32 && CONFIG_SUPERBLOCKS
} else if (mbmi->encoded_as_sb && cm->txfm_mode >= ALLOW_32X32) {
mbmi->txfm_size = TX_32X32;
#endif
} else if (cm->txfm_mode >= ALLOW_16X16 &&
((mbmi->ref_frame == INTRA_FRAME && mbmi->mode <= TM_PRED) ||
(mbmi->ref_frame != INTRA_FRAME && mbmi->mode != SPLITMV))) {
......
......@@ -693,6 +693,7 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
TX_SIZE tx_size = xd->mode_info_context->mbmi.txfm_size;
VP9_COMMON *const pc = &pbi->common;
MODE_INFO *orig_mi = xd->mode_info_context;
const int mis = pc->mode_info_stride;
assert(xd->mode_info_context->mbmi.encoded_as_sb);
......@@ -733,6 +734,30 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
}
/* dequantization and idct */
#if CONFIG_TX32X32
if (xd->mode_info_context->mbmi.txfm_size == TX_32X32) {
eobtotal = vp9_decode_sb_tokens(pbi, xd, bc);
if (eobtotal == 0) { // skip loopfilter
xd->mode_info_context->mbmi.mb_skip_coeff = 1;
if (mb_col + 1 < pc->mb_cols)
xd->mode_info_context[1].mbmi.mb_skip_coeff = 1;
if (mb_row + 1 < pc->mb_rows) {
xd->mode_info_context[mis].mbmi.mb_skip_coeff = 1;
if (mb_col + 1 < pc->mb_cols)
xd->mode_info_context[mis + 1].mbmi.mb_skip_coeff = 1;
}
} else {
vp9_dequant_idct_add_32x32(xd->sb_coeff_data.qcoeff, xd->block[0].dequant,
xd->dst.y_buffer, xd->dst.y_buffer,
xd->dst.y_stride, xd->dst.y_stride,
xd->eobs[0]);
vp9_dequant_idct_add_uv_block_16x16_c(xd->sb_coeff_data.qcoeff + 1024,
xd->block[16].dequant,
xd->dst.u_buffer, xd->dst.v_buffer,
xd->dst.uv_stride, xd->eobs + 16);
}
} else {
#endif
for (n = 0; n < 4; n++) {
int x_idx = n & 1, y_idx = n >> 1;
......@@ -742,7 +767,7 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->above_context = pc->above_context + mb_col + x_idx;
xd->left_context = pc->left_context + y_idx;
xd->mode_info_context = orig_mi + x_idx + y_idx * pc->mode_info_stride;
xd->mode_info_context = orig_mi + x_idx + y_idx * mis;
for (i = 0; i < 25; i++) {
xd->block[i].eob = 0;
xd->eobs[i] = 0;
......@@ -766,6 +791,9 @@ static void decode_superblock(VP9D_COMP *pbi, MACROBLOCKD *xd,
xd->above_context = pc->above_context + mb_col;
xd->left_context = pc->left_context;
xd->mode_info_context = orig_mi;
#if CONFIG_TX32X32
}
#endif
}
#endif
......@@ -1244,6 +1272,11 @@ static void read_coef_probs(VP9D_COMP *pbi, BOOL_DECODER* const bc) {
read_coef_probs_common(bc, pc->fc.coef_probs_16x16);