Commit 552d5cd7 authored by Geza Lore's avatar Geza Lore

Extend superblock size fo 128x128 pixels.

If --enable-ext-partition is used at build time, the superblock size
(sometimes also referred to as coding unit (CU) size) is extended to
128x128 pixels.

Change-Id: Ie09cec6b7e8d765b7555ff5d80974aab60803f3a
parent cd1d01b9
......@@ -28,7 +28,7 @@
namespace {
static const unsigned int kMaxDimension = MAX_CU_SIZE;
static const unsigned int kMaxDimension = MAX_SB_SIZE;
typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
uint8_t *dst, ptrdiff_t dst_stride,
......
......@@ -50,16 +50,16 @@ class MaskedSADTest : public ::testing::TestWithParam<MaskedSADParam> {
TEST_P(MaskedSADTest, OperationCheck) {
unsigned int ref_ret, ret;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_CU_SIZE;
int ref_stride = MAX_CU_SIZE;
int msk_stride = MAX_CU_SIZE;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
for (int j = 0; j < MAX_CU_SIZE*MAX_CU_SIZE; j++) {
for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand8();
ref_ptr[j] = rnd.Rand8();
msk_ptr[j] = ((rnd.Rand8()&0x7f) > 64) ? rnd.Rand8()&0x3f : 64;
......@@ -108,18 +108,18 @@ class HighbdMaskedSADTest : public ::testing::
TEST_P(HighbdMaskedSADTest, OperationCheck) {
unsigned int ref_ret, ret;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_CU_SIZE;
int ref_stride = MAX_CU_SIZE;
int msk_stride = MAX_CU_SIZE;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
for (int j = 0; j < MAX_CU_SIZE*MAX_CU_SIZE; j++) {
for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand16()&0xfff;
ref_ptr[j] = rnd.Rand16()&0xfff;
msk_ptr[j] = ((rnd.Rand8()&0x7f) > 64) ? rnd.Rand8()&0x3f : 64;
......
......@@ -58,17 +58,17 @@ TEST_P(MaskedVarianceTest, OperationCheck) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_CU_SIZE;
int ref_stride = MAX_CU_SIZE;
int msk_stride = MAX_CU_SIZE;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
for (int j = 0; j < MAX_CU_SIZE*MAX_CU_SIZE; j++) {
for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand8();
ref_ptr[j] = rnd.Rand8();
msk_ptr[j] = rnd(65);
......@@ -100,19 +100,19 @@ TEST_P(MaskedVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_CU_SIZE;
int ref_stride = MAX_CU_SIZE;
int msk_stride = MAX_CU_SIZE;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < 8; ++i) {
memset(src_ptr, (i & 0x1) ? 255 : 0, MAX_CU_SIZE*MAX_CU_SIZE);
memset(ref_ptr, (i & 0x2) ? 255 : 0, MAX_CU_SIZE*MAX_CU_SIZE);
memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_CU_SIZE*MAX_CU_SIZE);
memset(src_ptr, (i & 0x1) ? 255 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
memset(ref_ptr, (i & 0x2) ? 255 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
ref_ret = ref_func_(src_ptr, src_stride,
ref_ptr, ref_stride,
......@@ -166,21 +166,21 @@ TEST_P(MaskedSubPixelVarianceTest, OperationCheck) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
int err_count = 0;
int first_failure = -1;
int src_stride = (MAX_CU_SIZE+1);
int ref_stride = (MAX_CU_SIZE+1);
int msk_stride = (MAX_CU_SIZE+1);
int src_stride = (MAX_SB_SIZE+1);
int ref_stride = (MAX_SB_SIZE+1);
int msk_stride = (MAX_SB_SIZE+1);
int xoffset;
int yoffset;
for (int i = 0; i < number_of_iterations; ++i) {
int xoffsets[] = {0, 4, rnd(BIL_SUBPEL_SHIFTS)};
int yoffsets[] = {0, 4, rnd(BIL_SUBPEL_SHIFTS)};
for (int j = 0; j < (MAX_CU_SIZE+1)*(MAX_CU_SIZE+1); j++) {
for (int j = 0; j < (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1); j++) {
src_ptr[j] = rnd.Rand8();
ref_ptr[j] = rnd.Rand8();
msk_ptr[j] = rnd(65);
......@@ -221,23 +221,23 @@ TEST_P(MaskedSubPixelVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
int first_failure_x = -1;
int first_failure_y = -1;
int err_count = 0;
int first_failure = -1;
int src_stride = (MAX_CU_SIZE+1);
int ref_stride = (MAX_CU_SIZE+1);
int msk_stride = (MAX_CU_SIZE+1);
int src_stride = (MAX_SB_SIZE+1);
int ref_stride = (MAX_SB_SIZE+1);
int msk_stride = (MAX_SB_SIZE+1);
for (int xoffset = 0 ; xoffset < BIL_SUBPEL_SHIFTS ; xoffset++) {
for (int yoffset = 0 ; yoffset < BIL_SUBPEL_SHIFTS ; yoffset++) {
for (int i = 0; i < 8; ++i) {
memset(src_ptr, (i & 0x1) ? 255 : 0, (MAX_CU_SIZE+1)*(MAX_CU_SIZE+1));
memset(ref_ptr, (i & 0x2) ? 255 : 0, (MAX_CU_SIZE+1)*(MAX_CU_SIZE+1));
memset(msk_ptr, (i & 0x4) ? 64 : 0, (MAX_CU_SIZE+1)*(MAX_CU_SIZE+1));
memset(src_ptr, (i & 0x1) ? 255 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
memset(ref_ptr, (i & 0x2) ? 255 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
memset(msk_ptr, (i & 0x4) ? 64 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
ref_ret = ref_func_(src_ptr, src_stride,
xoffset, yoffset,
......@@ -297,19 +297,19 @@ TEST_P(HighbdMaskedVarianceTest, OperationCheck) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_CU_SIZE;
int ref_stride = MAX_CU_SIZE;
int msk_stride = MAX_CU_SIZE;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < number_of_iterations; ++i) {
for (int j = 0; j < MAX_CU_SIZE*MAX_CU_SIZE; j++) {
for (int j = 0; j < MAX_SB_SIZE*MAX_SB_SIZE; j++) {
src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
msk_ptr[j] = rnd(65);
......@@ -341,23 +341,23 @@ TEST_P(HighbdMaskedVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_CU_SIZE*MAX_CU_SIZE]);
DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[MAX_SB_SIZE*MAX_SB_SIZE]);
uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int src_stride = MAX_CU_SIZE;
int ref_stride = MAX_CU_SIZE;
int msk_stride = MAX_CU_SIZE;
int src_stride = MAX_SB_SIZE;
int ref_stride = MAX_SB_SIZE;
int msk_stride = MAX_SB_SIZE;
for (int i = 0; i < 8; ++i) {
vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
MAX_CU_SIZE*MAX_CU_SIZE);
MAX_SB_SIZE*MAX_SB_SIZE);
vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
MAX_CU_SIZE*MAX_CU_SIZE);
memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_CU_SIZE*MAX_CU_SIZE);
MAX_SB_SIZE*MAX_SB_SIZE);
memset(msk_ptr, (i & 0x4) ? 64 : 0, MAX_SB_SIZE*MAX_SB_SIZE);
ref_ret = ref_func_(src8_ptr, src_stride,
ref8_ptr, ref_stride,
......@@ -407,24 +407,24 @@ TEST_P(HighbdMaskedSubPixelVarianceTest, OperationCheck) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int err_count = 0;
int first_failure = -1;
int first_failure_x = -1;
int first_failure_y = -1;
int src_stride = (MAX_CU_SIZE+1);
int ref_stride = (MAX_CU_SIZE+1);
int msk_stride = (MAX_CU_SIZE+1);
int src_stride = (MAX_SB_SIZE+1);
int ref_stride = (MAX_SB_SIZE+1);
int msk_stride = (MAX_SB_SIZE+1);
int xoffset, yoffset;
for (int i = 0; i < number_of_iterations; ++i) {
for (xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
for (yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
for (int j = 0; j < (MAX_CU_SIZE+1)*(MAX_CU_SIZE+1); j++) {
for (int j = 0; j < (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1); j++) {
src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
msk_ptr[j] = rnd(65);
......@@ -465,27 +465,27 @@ TEST_P(HighbdMaskedSubPixelVarianceTest, ExtremeValues) {
unsigned int ref_ret, opt_ret;
unsigned int ref_sse, opt_sse;
ACMRandom rnd(ACMRandom::DeterministicSeed());
DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1)]);
DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
DECLARE_ALIGNED(16, uint8_t, msk_ptr[(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1)]);
uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
int first_failure_x = -1;
int first_failure_y = -1;
int err_count = 0;
int first_failure = -1;
int src_stride = (MAX_CU_SIZE+1);
int ref_stride = (MAX_CU_SIZE+1);
int msk_stride = (MAX_CU_SIZE+1);
int src_stride = (MAX_SB_SIZE+1);
int ref_stride = (MAX_SB_SIZE+1);
int msk_stride = (MAX_SB_SIZE+1);
for (int xoffset = 0 ; xoffset < BIL_SUBPEL_SHIFTS ; xoffset++) {
for (int yoffset = 0 ; yoffset < BIL_SUBPEL_SHIFTS ; yoffset++) {
for (int i = 0; i < 8; ++i) {
vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1));
(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
(MAX_CU_SIZE+1)*(MAX_CU_SIZE+1));
memset(msk_ptr, (i & 0x4) ? 64 : 0, (MAX_CU_SIZE+1)*(MAX_CU_SIZE+1));
(MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
memset(msk_ptr, (i & 0x4) ? 64 : 0, (MAX_SB_SIZE+1)*(MAX_SB_SIZE+1));
ref_ret = ref_func_(src8_ptr, src_stride,
xoffset, yoffset,
......
......@@ -10,13 +10,16 @@
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vp9_rtcd.h"
#include "./vpx_config.h"
#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#if CONFIG_VP10
#include "vp10/common/blockd.h"
#elif CONFIG_VP9
#include "vp9/common/vp9_blockd.h"
#endif
#include "vpx_mem/vpx_mem.h"
typedef void (*SubtractFunc)(int rows, int cols,
......@@ -24,7 +27,7 @@ typedef void (*SubtractFunc)(int rows, int cols,
const uint8_t *src_ptr, ptrdiff_t src_stride,
const uint8_t *pred_ptr, ptrdiff_t pred_stride);
namespace vp9 {
namespace {
class VP9SubtractBlockTest : public ::testing::TestWithParam<SubtractFunc> {
public:
......@@ -105,5 +108,4 @@ INSTANTIATE_TEST_CASE_P(NEON, VP9SubtractBlockTest,
INSTANTIATE_TEST_CASE_P(MSA, VP9SubtractBlockTest,
::testing::Values(vpx_subtract_block_msa));
#endif
} // namespace vp9
} // namespace
......@@ -147,7 +147,7 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += fdct8x8_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += variance_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_error_block_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_quantize_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += vp9_subtract_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP9_ENCODER) += subtract_test.cc
ifeq ($(CONFIG_VP9_ENCODER),yes)
LIBVPX_TEST_SRCS-$(CONFIG_SPATIAL_SVC) += svc_test.cc
......@@ -172,6 +172,7 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_fht16x16_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_ANS) += vp10_ans_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += sum_squares_test.cc
LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += subtract_test.cc
ifeq ($(CONFIG_EXT_INTER),yes)
LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_variance_test.cc
......
......@@ -44,9 +44,6 @@ typedef enum {
#define IsInterpolatingFilter(filter) (1)
#endif // CONFIG_EXT_INTERP && SUPPORT_NONINTERPOLATING_FILTERS
#define MAXTXLEN 32
#define CU_SIZE 64
static INLINE int is_inter_mode(PREDICTION_MODE mode) {
#if CONFIG_EXT_INTER
return mode >= NEARESTMV && mode <= NEW_NEWMV;
......@@ -167,8 +164,8 @@ typedef struct {
PREDICTION_MODE mode;
TX_SIZE tx_size;
#if CONFIG_VAR_TX
// TODO(jingning): This effectively assigned an entry for each 8x8 block.
// Apparently it takes much more space than needed.
// TODO(jingning): This effectively assigned a separate entry for each
// 8x8 block. Apparently it takes much more space than needed.
TX_SIZE inter_tx_size[MI_BLOCK_SIZE][MI_BLOCK_SIZE];
#endif
int8_t skip;
......@@ -318,15 +315,15 @@ typedef struct macroblockd {
const YV12_BUFFER_CONFIG *cur_buf;
ENTROPY_CONTEXT *above_context[MAX_MB_PLANE];
ENTROPY_CONTEXT left_context[MAX_MB_PLANE][16];
ENTROPY_CONTEXT left_context[MAX_MB_PLANE][2 * MI_BLOCK_SIZE];
PARTITION_CONTEXT *above_seg_context;
PARTITION_CONTEXT left_seg_context[8];
PARTITION_CONTEXT left_seg_context[MI_BLOCK_SIZE];
#if CONFIG_VAR_TX
TXFM_CONTEXT *above_txfm_context;
TXFM_CONTEXT *left_txfm_context;
TXFM_CONTEXT left_txfm_context_buffer[8];
TXFM_CONTEXT left_txfm_context_buffer[MI_BLOCK_SIZE];
TX_SIZE max_tx_size;
#if CONFIG_SUPERTX
......@@ -686,6 +683,7 @@ void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
#if CONFIG_EXT_INTER
static INLINE int is_interintra_allowed_bsize(const BLOCK_SIZE bsize) {
// TODO(debargha): Should this be bsize < BLOCK_LARGEST?
return (bsize >= BLOCK_8X8) && (bsize < BLOCK_64X64);
}
......
......@@ -19,154 +19,282 @@
extern "C" {
#endif
#if CONFIG_EXT_PARTITION
# define IF_EXT_PARTITION(...) __VA_ARGS__
#else
# define IF_EXT_PARTITION(...)
#endif
// Log 2 conversion lookup tables for block width and height
static const uint8_t b_width_log2_lookup[BLOCK_SIZES] =
{0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4};
{0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, IF_EXT_PARTITION(4, 5, 5)};
static const uint8_t b_height_log2_lookup[BLOCK_SIZES] =
{0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4};
static const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] =
{1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16};
static const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] =
{1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16};
{0, 1, 0, 1, 2, 1, 2, 3, 2, 3, 4, 3, 4, IF_EXT_PARTITION(5, 4, 5)};
// Log 2 conversion lookup tables for modeinfo width and height
static const uint8_t mi_width_log2_lookup[BLOCK_SIZES] =
{0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3};
{0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, IF_EXT_PARTITION(3, 4, 4)};
static const uint8_t mi_height_log2_lookup[BLOCK_SIZES] =
{0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3};
{0, 0, 0, 0, 1, 0, 1, 2, 1, 2, 3, 2, 3, IF_EXT_PARTITION(4, 3, 4)};
// Width/height lookup tables in units of varios block sizes
static const uint8_t num_4x4_blocks_wide_lookup[BLOCK_SIZES] =
{1, 1, 2, 2, 2, 4, 4, 4, 8, 8, 8, 16, 16, IF_EXT_PARTITION(16, 32, 32)};
static const uint8_t num_4x4_blocks_high_lookup[BLOCK_SIZES] =
{1, 2, 1, 2, 4, 2, 4, 8, 4, 8, 16, 8, 16, IF_EXT_PARTITION(32, 16, 32)};
static const uint8_t num_8x8_blocks_wide_lookup[BLOCK_SIZES] =
{1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8};
{1, 1, 1, 1, 1, 2, 2, 2, 4, 4, 4, 8, 8, IF_EXT_PARTITION(8, 16, 16)};
static const uint8_t num_8x8_blocks_high_lookup[BLOCK_SIZES] =
{1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8};
{1, 1, 1, 1, 2, 1, 2, 4, 2, 4, 8, 4, 8, IF_EXT_PARTITION(16, 8, 16)};
static const uint8_t num_16x16_blocks_wide_lookup[BLOCK_SIZES] =
{1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 4, 4, IF_EXT_PARTITION(4, 8, 8)};
static const uint8_t num_16x16_blocks_high_lookup[BLOCK_SIZES] =
{1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 4, 2, 4, IF_EXT_PARTITION(8, 4, 8)};
// VPXMIN(3, VPXMIN(b_width_log2(bsize), b_height_log2(bsize)))
static const uint8_t size_group_lookup[BLOCK_SIZES] =
{0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3};
{0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, IF_EXT_PARTITION(3, 3, 3)};
static const uint8_t num_pels_log2_lookup[BLOCK_SIZES] =
{4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12};
{4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, IF_EXT_PARTITION(13, 13, 14)};
static const PARTITION_TYPE partition_lookup[][BLOCK_SIZES] = {
{ // 4X4
// 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
PARTITION_NONE, PARTITION_INVALID, PARTITION_INVALID,
static const PARTITION_TYPE
partition_lookup[MAX_SB_SIZE_LOG2 - 1][BLOCK_SIZES] = {
{ // 4X4 ->
// 4X4
PARTITION_NONE,
// 4X8, 8X4, 8X8
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
// 8X16, 16X8, 16X16
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
// 16X32, 32X16, 32X32
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
// 32X64, 64X32, 64X64
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
#if CONFIG_EXT_PARTITION
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
#endif // CONFIG_EXT_PARTITION
}, { // 8X8 ->
// 4X4
PARTITION_SPLIT,
// 4X8, 8X4, 8X8
PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
// 8X16, 16X8, 16X16
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
// 16X32, 32X16, 32X32
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
// 32X64, 64X32, 64X64
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
#endif // CONFIG_EXT_PARTITION
}, { // 16X16 ->
// 4X4
PARTITION_SPLIT,
// 4X8, 8X4, 8X8
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 8X16, 16X8, 16X16
PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
// 16X32, 32X16, 32X32
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
PARTITION_INVALID
}, { // 8X8
// 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
// 32X64, 64X32, 64X64
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID
}, { // 16X16
// 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID,
#endif // CONFIG_EXT_PARTITION
}, { // 32X32 ->
// 4X4
PARTITION_SPLIT,
// 4X8, 8X4, 8X8
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 8X16, 16X8, 16X16
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 16X32, 32X16, 32X32
PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
// 32X64, 64X32, 64X64
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
PARTITION_INVALID, PARTITION_INVALID
}, { // 32X32
// 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT,
PARTITION_HORZ, PARTITION_NONE, PARTITION_INVALID,
PARTITION_INVALID, PARTITION_INVALID
}, { // 64X64
// 4X4, 4X8,8X4,8X8,8X16,16X8,16X16,16X32,32X16,32X32,32X64,64X32,64X64
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_VERT, PARTITION_HORZ,
PARTITION_NONE
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
#endif // CONFIG_EXT_PARTITION
}, { // 64X64 ->
// 4X4
PARTITION_SPLIT,
// 4X8, 8X4, 8X8
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 8X16, 16X8, 16X16
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 16X32, 32X16, 32X32
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 32X64, 64X32, 64X64
PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
#if CONFIG_EXT_PARTITION
// 64x128, 128x64, 128x128
PARTITION_INVALID, PARTITION_INVALID, PARTITION_INVALID,
}, { // 128x128 ->
// 4X4
PARTITION_SPLIT,
// 4X8, 8X4, 8X8
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 8X16, 16X8, 16X16
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 16X32, 32X16, 32X32
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 32X64, 64X32, 64X64
PARTITION_SPLIT, PARTITION_SPLIT, PARTITION_SPLIT,
// 64x128, 128x64, 128x128
PARTITION_VERT, PARTITION_HORZ, PARTITION_NONE,
#endif // CONFIG_EXT_PARTITION
}
};
#if CONFIG_EXT_PARTITION_TYPES
static const BLOCK_SIZE subsize_lookup[EXT_PARTITION_TYPES][BLOCK_SIZES] = {
static const BLOCK_SIZE subsize_lookup[EXT_PARTITION_TYPES][BLOCK_SIZES] =
#else
static const BLOCK_SIZE subsize_lookup[PARTITION_TYPES][BLOCK_SIZES] =
#endif // CONFIG_EXT_PARTITION_TYPES
{