Commit a3874850 authored by Ronald S. Bultje's avatar Ronald S. Bultje

Make SB coding size-independent.

Merge sb32x32 and sb64x64 functions; allow for rectangular sizes. Code
gives identical encoder results before and after. There are a few
macros for rectangular block sizes under the sbsegment experiment; this
experiment is not yet functional and should not yet be used.

Change-Id: I71f93b5d2a1596e99a6f01f29c3f0a456694d728
parent f42bee7e
......@@ -252,6 +252,7 @@ EXPERIMENT_LIST="
implicit_compoundinter_weight
scatterscan
oneshotq
sbsegment
"
CONFIG_LIST="
external_build
......
......@@ -19,6 +19,7 @@
#include "vp9/common/vp9_treecoder.h"
#include "vpx_ports/mem.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_enums.h"
#define TRUE 1
#define FALSE 0
......@@ -198,11 +199,43 @@ typedef enum {
MAX_REF_FRAMES = 4
} MV_REFERENCE_FRAME;
typedef enum {
BLOCK_SIZE_MB16X16 = 0,
BLOCK_SIZE_SB32X32 = 1,
BLOCK_SIZE_SB64X64 = 2,
} BLOCK_SIZE_TYPE;
static INLINE int mb_width_log2(BLOCK_SIZE_TYPE sb_type) {
switch (sb_type) {
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB16X32:
#endif
case BLOCK_SIZE_MB16X16: return 0;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB32X16:
case BLOCK_SIZE_SB32X64:
#endif
case BLOCK_SIZE_SB32X32: return 1;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB64X32:
#endif
case BLOCK_SIZE_SB64X64: return 2;
default: assert(0);
}
}
static INLINE int mb_height_log2(BLOCK_SIZE_TYPE sb_type) {
switch (sb_type) {
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB32X16:
#endif
case BLOCK_SIZE_MB16X16: return 0;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB16X32:
case BLOCK_SIZE_SB64X32:
#endif
case BLOCK_SIZE_SB32X32: return 1;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB32X64:
#endif
case BLOCK_SIZE_SB64X64: return 2;
default: assert(0);
}
}
typedef struct {
MB_PREDICTION_MODE mode, uv_mode;
......@@ -469,11 +502,12 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
// is smaller than the prediction size
TX_TYPE tx_type = DCT_DCT;
const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
const int wb = mb_width_log2(sb_type), hb = mb_height_log2(sb_type);
#if !USE_ADST_FOR_SB
if (sb_type)
if (sb_type > BLOCK_SIZE_MB16X16)
return tx_type;
#endif
if (ib >= (16 << (2 * sb_type))) // no chroma adst
if (ib >= (16 << (wb + hb))) // no chroma adst
return tx_type;
if (xd->lossless)
return DCT_DCT;
......@@ -524,7 +558,7 @@ static TX_TYPE get_tx_type_4x4(const MACROBLOCKD *xd, int ib) {
xd->q_index < ACTIVE_HT) {
#if USE_ADST_FOR_I16X16_4X4
#if USE_ADST_PERIPHERY_ONLY
const int hmax = 4 << sb_type;
const int hmax = 4 << wb;
tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
#if USE_ADST_FOR_REMOTE_EDGE
if ((ib & (hmax - 1)) != 0 && ib >= hmax)
......@@ -557,11 +591,12 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
// is smaller than the prediction size
TX_TYPE tx_type = DCT_DCT;
const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
const int wb = mb_width_log2(sb_type), hb = mb_height_log2(sb_type);
#if !USE_ADST_FOR_SB
if (sb_type)
if (sb_type > BLOCK_SIZE_MB16X16)
return tx_type;
#endif
if (ib >= (16 << (2 * sb_type))) // no chroma adst
if (ib >= (16 << (wb + hb))) // no chroma adst
return tx_type;
if (xd->mode_info_context->mbmi.mode == I8X8_PRED &&
xd->q_index < ACTIVE_HT8) {
......@@ -574,7 +609,7 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
xd->q_index < ACTIVE_HT8) {
#if USE_ADST_FOR_I16X16_8X8
#if USE_ADST_PERIPHERY_ONLY
const int hmax = 4 << sb_type;
const int hmax = 4 << wb;
tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
#if USE_ADST_FOR_REMOTE_EDGE
if ((ib & (hmax - 1)) != 0 && ib >= hmax)
......@@ -605,18 +640,19 @@ static TX_TYPE get_tx_type_8x8(const MACROBLOCKD *xd, int ib) {
static TX_TYPE get_tx_type_16x16(const MACROBLOCKD *xd, int ib) {
TX_TYPE tx_type = DCT_DCT;
const BLOCK_SIZE_TYPE sb_type = xd->mode_info_context->mbmi.sb_type;
const int wb = mb_width_log2(sb_type), hb = mb_height_log2(sb_type);
#if !USE_ADST_FOR_SB
if (sb_type)
if (sb_type > BLOCK_SIZE_MB16X16)
return tx_type;
#endif
if (ib >= (16 << (2 * sb_type)))
if (ib >= (16 << (wb + hb)))
return tx_type;
if (xd->mode_info_context->mbmi.mode < I8X8_PRED &&
xd->q_index < ACTIVE_HT16) {
tx_type = txfm_map(pred_mode_conv(xd->mode_info_context->mbmi.mode));
#if USE_ADST_PERIPHERY_ONLY
if (sb_type) {
const int hmax = 4 << sb_type;
if (sb_type > BLOCK_SIZE_MB16X16) {
const int hmax = 4 << wb;
#if USE_ADST_FOR_REMOTE_EDGE
if ((ib & (hmax - 1)) != 0 && ib >= hmax)
tx_type = DCT_DCT;
......@@ -658,6 +694,10 @@ static TX_SIZE get_uv_tx_size(const MACROBLOCKD *xd) {
switch (mbmi->sb_type) {
case BLOCK_SIZE_SB64X64:
return size;
#if CONFIG_SBSEGMENT
case BLOCK_SIZE_SB64X32:
case BLOCK_SIZE_SB32X64:
#endif
case BLOCK_SIZE_SB32X32:
if (size == TX_32X32)
return TX_16X16;
......
......@@ -122,16 +122,12 @@ static INLINE void vp9_reset_mb_tokens_context(MACROBLOCKD* const xd) {
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES));
}
static INLINE void vp9_reset_sb_tokens_context(MACROBLOCKD* const xd) {
static INLINE void vp9_reset_sb_tokens_context(MACROBLOCKD* const xd,
BLOCK_SIZE_TYPE bsize) {
/* Clear entropy contexts */
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 2);
}
static INLINE void vp9_reset_sb64_tokens_context(MACROBLOCKD* const xd) {
/* Clear entropy contexts */
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 4);
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * 4);
const int bw = 1 << mb_width_log2(bsize), bh = 1 << mb_height_log2(bsize);
vpx_memset(xd->above_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * bw);
vpx_memset(xd->left_context, 0, sizeof(ENTROPY_CONTEXT_PLANES) * bh);
}
extern const int vp9_coef_bands8x8[64];
......
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VP9_COMMON_VP9_ENUMS_H_
#define VP9_COMMON_VP9_ENUMS_H_
#include "./vpx_config.h"
typedef enum BLOCK_SIZE_TYPE {
BLOCK_SIZE_MB16X16,
#if CONFIG_SBSEGMENT
BLOCK_SIZE_SB16X32,
BLOCK_SIZE_SB32X16,
#endif
BLOCK_SIZE_SB32X32,
#if CONFIG_SBSEGMENT
BLOCK_SIZE_SB32X64,
BLOCK_SIZE_SB64X32,
#endif
BLOCK_SIZE_SB64X64,
} BLOCK_SIZE_TYPE;
#endif // VP9_COMMON_VP9_ENUMS_H_
This diff is collapsed.
......@@ -41,21 +41,13 @@ void vp9_inverse_transform_mb_16x16(MACROBLOCKD *xd);
void vp9_inverse_transform_mby_16x16(MACROBLOCKD *xd);
void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd);
void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd);
void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd);
void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd);
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd);
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd);
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64y_32x32(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64y_16x16(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64y_8x8(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64y_4x4(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64uv_32x32(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64uv_16x16(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64uv_8x8(MACROBLOCKD *xd);
void vp9_inverse_transform_sb64uv_4x4(MACROBLOCKD *xd);
void vp9_inverse_transform_sby_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sby_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_32x32(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_16x16(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_8x8(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
void vp9_inverse_transform_sbuv_4x4(MACROBLOCKD *xd, BLOCK_SIZE_TYPE bsize);
#endif // VP9_COMMON_VP9_INVTRANS_H_
......@@ -84,56 +84,45 @@ void vp9_recon_mbuv_s_c(MACROBLOCKD *xd, uint8_t *udst, uint8_t *vdst) {
}
}
static INLINE void recon_sby(MACROBLOCKD *mb, uint8_t *dst, int size) {
void vp9_recon_sby_s_c(MACROBLOCKD *mb, uint8_t *dst,
BLOCK_SIZE_TYPE bsize) {
const int bw = 16 << mb_width_log2(bsize), bh = 16 << mb_height_log2(bsize);
int x, y;
const int stride = mb->block[0].dst_stride;
const int16_t *diff = mb->diff;
for (y = 0; y < size; y++) {
for (x = 0; x < size; x++)
for (y = 0; y < bh; y++) {
for (x = 0; x < bw; x++)
dst[x] = clip_pixel(dst[x] + diff[x]);
dst += stride;
diff += size;
diff += bw;
}
}
static INLINE void recon_sbuv(MACROBLOCKD *mb, uint8_t *u_dst, uint8_t *v_dst,
int y_offset, int size) {
void vp9_recon_sbuv_s_c(MACROBLOCKD *mb, uint8_t *u_dst, uint8_t *v_dst,
BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bhl = mb_height_log2(bsize);
const int uoff = (16 * 16) << (bwl + bhl), voff = (uoff * 5) >> 2;
const int bw = 8 << bwl, bh = 8 << bhl;
int x, y;
const int stride = mb->block[16].dst_stride;
const int16_t *u_diff = mb->diff + y_offset;
const int16_t *v_diff = mb->diff + y_offset + size*size;
const int16_t *u_diff = mb->diff + uoff;
const int16_t *v_diff = mb->diff + voff;
for (y = 0; y < size; y++) {
for (x = 0; x < size; x++) {
for (y = 0; y < bh; y++) {
for (x = 0; x < bw; x++) {
u_dst[x] = clip_pixel(u_dst[x] + u_diff[x]);
v_dst[x] = clip_pixel(v_dst[x] + v_diff[x]);
}
u_dst += stride;
v_dst += stride;
u_diff += size;
v_diff += size;
u_diff += bw;
v_diff += bw;
}
}
void vp9_recon_sby_s_c(MACROBLOCKD *mb, uint8_t *dst) {
recon_sby(mb, dst, 32);
}
void vp9_recon_sbuv_s_c(MACROBLOCKD *mb, uint8_t *u_dst, uint8_t *v_dst) {
recon_sbuv(mb, u_dst, v_dst, 1024, 16);
}
void vp9_recon_sb64y_s_c(MACROBLOCKD *mb, uint8_t *dst) {
recon_sby(mb, dst, 64);
}
void vp9_recon_sb64uv_s_c(MACROBLOCKD *mb, uint8_t *u_dst, uint8_t *v_dst) {
recon_sbuv(mb, u_dst, v_dst, 4096, 32);
}
void vp9_recon_mby_c(MACROBLOCKD *xd) {
int i;
......
......@@ -5,6 +5,7 @@ cat <<EOF
*/
#include "vpx/vpx_integer.h"
#include "vp9/common/vp9_enums.h"
struct loop_filter_info;
struct blockd;
......@@ -85,18 +86,12 @@ specialize vp9_recon_mby_s
prototype void vp9_recon_mbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
specialize void vp9_recon_mbuv_s
prototype void vp9_recon_sby_s "struct macroblockd *x, uint8_t *dst"
prototype void vp9_recon_sby_s "struct macroblockd *x, uint8_t *dst, enum BLOCK_SIZE_TYPE bsize"
specialize vp9_recon_sby_s
prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
prototype void vp9_recon_sbuv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst, enum BLOCK_SIZE_TYPE bsize"
specialize void vp9_recon_sbuv_s
prototype void vp9_recon_sb64y_s "struct macroblockd *x, uint8_t *dst"
specialize vp9_recon_sb64y_s
prototype void vp9_recon_sb64uv_s "struct macroblockd *x, uint8_t *udst, uint8_t *vdst"
specialize void vp9_recon_sb64uv_s
prototype void vp9_build_intra_predictors_mby_s "struct macroblockd *x"
specialize vp9_build_intra_predictors_mby_s
......
......@@ -675,7 +675,7 @@ static void decode_sb64(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
mb_init_dequantizer(pbi, xd);
if (mi->mbmi.mb_skip_coeff) {
vp9_reset_sb64_tokens_context(xd);
vp9_reset_sb_tokens_context(xd, BLOCK_SIZE_SB64X64);
// Special case: Force the loopfilter to skip when eobtotal and
// mb_skip_coeff are zero.
......@@ -753,7 +753,7 @@ static void decode_sb32(VP9D_COMP *pbi, MACROBLOCKD *xd, int mb_row, int mb_col,
mb_init_dequantizer(pbi, xd);
if (mi->mbmi.mb_skip_coeff) {
vp9_reset_sb_tokens_context(xd);
vp9_reset_sb_tokens_context(xd, BLOCK_SIZE_SB32X32);
// Special case: Force the loopfilter to skip when eobtotal and
// mb_skip_coeff are zero.
......
......@@ -163,8 +163,16 @@ struct macroblock {
// Structure to hold context for each of the 4 MBs within a SB:
// when encoded as 4 independent MBs:
PICK_MODE_CONTEXT mb_context[4][4];
#if CONFIG_SBSEGMENT
PICK_MODE_CONTEXT sb32x16_context[4][2];
PICK_MODE_CONTEXT sb16x32_context[4][2];
#endif
// when 4 MBs share coding parameters:
PICK_MODE_CONTEXT sb32_context[4];
#if CONFIG_SBSEGMENT
PICK_MODE_CONTEXT sb32x64_context[2];
PICK_MODE_CONTEXT sb64x32_context[2];
#endif
PICK_MODE_CONTEXT sb64_context;
void (*fwd_txm4x4)(int16_t *input, int16_t *output, int pitch);
......
This diff is collapsed.
This diff is collapsed.
......@@ -44,37 +44,30 @@ void vp9_transform_mb_16x16(MACROBLOCK *mb);
void vp9_transform_mby_16x16(MACROBLOCK *x);
void vp9_optimize_mby_16x16(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sby_32x32(MACROBLOCK *x);
void vp9_optimize_sby_32x32(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sby_16x16(MACROBLOCK *x);
void vp9_optimize_sby_16x16(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sby_8x8(MACROBLOCK *x);
void vp9_optimize_sby_8x8(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sby_4x4(MACROBLOCK *x);
void vp9_optimize_sby_4x4(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sbuv_16x16(MACROBLOCK *x);
void vp9_optimize_sbuv_16x16(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sbuv_8x8(MACROBLOCK *x);
void vp9_optimize_sbuv_8x8(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sbuv_4x4(MACROBLOCK *x);
void vp9_optimize_sbuv_4x4(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64y_32x32(MACROBLOCK *x);
void vp9_optimize_sb64y_32x32(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64y_16x16(MACROBLOCK *x);
void vp9_optimize_sb64y_16x16(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64y_8x8(MACROBLOCK *x);
void vp9_optimize_sb64y_8x8(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64y_4x4(MACROBLOCK *x);
void vp9_optimize_sb64y_4x4(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64uv_32x32(MACROBLOCK *x);
void vp9_optimize_sb64uv_32x32(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64uv_16x16(MACROBLOCK *x);
void vp9_optimize_sb64uv_16x16(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64uv_8x8(MACROBLOCK *x);
void vp9_optimize_sb64uv_8x8(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sb64uv_4x4(MACROBLOCK *x);
void vp9_optimize_sb64uv_4x4(VP9_COMMON *const cm, MACROBLOCK *x);
void vp9_transform_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_32x32(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_16x16(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_8x8(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sby_4x4(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_32x32(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_16x16(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_8x8(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_transform_sbuv_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize);
void vp9_optimize_sbuv_4x4(VP9_COMMON *const cm, MACROBLOCK *x,
BLOCK_SIZE_TYPE bsize);
void vp9_fidct_mb(VP9_COMMON *const cm, MACROBLOCK *x);
......@@ -88,16 +81,12 @@ void vp9_subtract_mby_s_c(int16_t *diff, const uint8_t *src,
int src_stride, const uint8_t *pred,
int dst_stride);
void vp9_subtract_sby_s_c(int16_t *diff, const uint8_t *src, int src_stride,
const uint8_t *pred, int dst_stride);
const uint8_t *pred, int dst_stride,
BLOCK_SIZE_TYPE bsize);
void vp9_subtract_sbuv_s_c(int16_t *diff, const uint8_t *usrc,
const uint8_t *vsrc, int src_stride,
const uint8_t *upred,
const uint8_t *vpred, int dst_stride);
void vp9_subtract_sb64y_s_c(int16_t *diff, const uint8_t *src, int src_stride,
const uint8_t *pred, int dst_stride);
void vp9_subtract_sb64uv_s_c(int16_t *diff, const uint8_t *usrc,
const uint8_t *vsrc, int src_stride,
const uint8_t *upred,
const uint8_t *vpred, int dst_stride);
const uint8_t *vpred, int dst_stride,
BLOCK_SIZE_TYPE bsize);
#endif // VP9_ENCODER_VP9_ENCODEMB_H_
......@@ -496,128 +496,93 @@ void vp9_regular_quantize_b_32x32(MACROBLOCK *mb, int b_idx, int y_blocks) {
vp9_default_zig_zag1d_32x32, 2);
}
void vp9_quantize_sby_32x32(MACROBLOCK *x) {
vp9_regular_quantize_b_32x32(x, 0, 64);
}
void vp9_quantize_sby_16x16(MACROBLOCK *x) {
int n;
for (n = 0; n < 4; n++) {
TX_TYPE tx_type = get_tx_type_16x16(&x->e_mbd,
(16 * (n & 2)) + ((n & 1) * 4));
x->quantize_b_16x16(x, n * 16, tx_type, 64);
}
}
void vp9_quantize_sby_8x8(MACROBLOCK *x) {
int n;
for (n = 0; n < 16; n++) {
TX_TYPE tx_type = get_tx_type_8x8(&x->e_mbd,
(4 * (n & 12)) + ((n & 3) * 2));
x->quantize_b_8x8(x, n * 4, tx_type, 64);
}
}
void vp9_quantize_sby_4x4(MACROBLOCK *x) {
MACROBLOCKD *const xd = &x->e_mbd;
int n;
for (n = 0; n < 64; n++) {
const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
if (tx_type != DCT_DCT) {
vp9_ht_quantize_b_4x4(x, n, tx_type);
} else {
x->quantize_b_4x4(x, n, 64);
}
}
}
void vp9_quantize_sbuv_16x16(MACROBLOCK *x) {
x->quantize_b_16x16(x, 64, DCT_DCT, 64);
x->quantize_b_16x16(x, 80, DCT_DCT, 64);
}
void vp9_quantize_sbuv_8x8(MACROBLOCK *x) {
int i;
for (i = 64; i < 96; i += 4)
x->quantize_b_8x8(x, i, DCT_DCT, 64);
}
void vp9_quantize_sbuv_4x4(MACROBLOCK *x) {
int i;
for (i = 64; i < 96; i++)
x->quantize_b_4x4(x, i, 64);
}
void vp9_quantize_sb64y_32x32(MACROBLOCK *x) {
void vp9_quantize_sby_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bw = 1 << (mb_width_log2(bsize) - 1);
const int bh = 1 << (mb_height_log2(bsize) - 1);
int n;
for (n = 0; n < 4; n++)
vp9_regular_quantize_b_32x32(x, n * 64, 256);
for (n = 0; n < bw * bh; n++)
vp9_regular_quantize_b_32x32(x, n * 64, bw * bh * 64);
}
void vp9_quantize_sb64y_16x16(MACROBLOCK *x) {
void vp9_quantize_sby_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize), bw = 1 << bwl;
const int bh = 1 << mb_height_log2(bsize);
const int bstride = 16 << bwl;
int n;
for (n = 0; n < 16; n++) {
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
TX_TYPE tx_type = get_tx_type_16x16(&x->e_mbd,
(16 * (n & 12)) + ((n & 3) * 4));
x->quantize_b_16x16(x, n * 16, tx_type, 256);
4 * x_idx + y_idx * bstride);
x->quantize_b_16x16(x, n * 16, tx_type, 16 * bw * bh);
}
}
void vp9_quantize_sb64y_8x8(MACROBLOCK *x) {
void vp9_quantize_sby_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize) + 1, bw = 1 << bwl;
const int bh = 1 << (mb_height_log2(bsize) + 1);
const int bstride = 4 << bwl;
int n;
for (n = 0; n < 64; n++) {
for (n = 0; n < bw * bh; n++) {
const int x_idx = n & (bw - 1), y_idx = n >> bwl;
TX_TYPE tx_type = get_tx_type_8x8(&x->e_mbd,
(4 * (n & 56)) + ((n & 7) * 2));
x->quantize_b_8x8(x, n * 4, tx_type, 256);
2 * x_idx + y_idx * bstride);
x->quantize_b_8x8(x, n * 4, tx_type, 4 * bw * bh);
}
}
void vp9_quantize_sb64y_4x4(MACROBLOCK *x) {
void vp9_quantize_sby_4x4(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize) + 2, bw = 1 << bwl;
const int bh = 1 << (mb_height_log2(bsize) + 2);
MACROBLOCKD *const xd = &x->e_mbd;
int n;
for (n = 0; n < 256; n++) {
for (n = 0; n < bw * bh; n++) {
const TX_TYPE tx_type = get_tx_type_4x4(xd, n);
if (tx_type != DCT_DCT) {
vp9_ht_quantize_b_4x4(x, n, tx_type);
} else {
x->quantize_b_4x4(x, n, 256);
x->quantize_b_4x4(x, n, bw * bh);
}
}
}
void vp9_quantize_sb64uv_32x32(MACROBLOCK *x) {
void vp9_quantize_sbuv_32x32(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
assert(bsize == BLOCK_SIZE_SB64X64);
vp9_regular_quantize_b_32x32(x, 256, 256);
vp9_regular_quantize_b_32x32(x, 320, 256);
}
void vp9_quantize_sb64uv_16x16(MACROBLOCK *x) {
void vp9_quantize_sbuv_16x16(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize);
const int bhl = mb_width_log2(bsize);
const int uoff = 16 << (bhl + bwl);
int i;
for (i = 256; i < 384; i += 16)
x->quantize_b_16x16(x, i, DCT_DCT, 256);
for (i = uoff; i < ((uoff * 3) >> 1); i += 16)
x->quantize_b_16x16(x, i, DCT_DCT, uoff);
}
void vp9_quantize_sb64uv_8x8(MACROBLOCK *x) {
void vp9_quantize_sbuv_8x8(MACROBLOCK *x, BLOCK_SIZE_TYPE bsize) {
const int bwl = mb_width_log2(bsize);
const int bhl = mb_width_log2(bsize);
const int uoff = 16 << (bhl + bwl);
int i;
for (i = 256; i < 384; i += 4)
x->quantize_b_8x8(x, i, DCT_DCT, 256);
for (i = uoff; i < ((uoff * 3) >> 1); i += 4)
x->quantize_b_8x8(x, i, DCT_DCT, uoff);
}