Commit 9739f365 authored by David Barker's avatar David Barker

Fix clpf + ext_partition and dering + ext_partition

In both cases, the data to be signalled (for clpf/dering) is
stored at the topmost superblock level, but previously this data
would not be signalled for 128x128 superblocks.

For dering, it was also assumed that all superblocks were of the
maximum possible size. When ext_partition was enabled and we
chose to use a 64x64 superblock, this resulted in an out-of-bounds
access to cm->mi_grid_visible, leading to a segfault.

Change-Id: I81b9c037ed8db8be16d7e4cf6041e1df7525cc97
parent 3827fddd
......@@ -33,8 +33,14 @@ int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col) {
int skip = 1;
maxc = cm->mi_cols - mi_col;
maxr = cm->mi_rows - mi_row;
#if CONFIG_EXT_PARTITION
if (maxr > cm->mib_size_log2) maxr = cm->mib_size_log2;
if (maxc > cm->mib_size_log2) maxc = cm->mib_size_log2;
#else
if (maxr > MAX_MIB_SIZE) maxr = MAX_MIB_SIZE;
if (maxc > MAX_MIB_SIZE) maxc = MAX_MIB_SIZE;
#endif
for (r = 0; r < maxr; r++) {
for (c = 0; c < maxc; c++) {
skip = skip &&
......@@ -54,8 +60,13 @@ int sb_compute_dering_list(const AV1_COMMON *const cm, int mi_row, int mi_col,
grid = cm->mi_grid_visible;
maxc = cm->mi_cols - mi_col;
maxr = cm->mi_rows - mi_row;
#if CONFIG_EXT_PARTITION
if (maxr > cm->mib_size_log2) maxr = cm->mib_size_log2;
if (maxc > cm->mib_size_log2) maxc = cm->mib_size_log2;
#else
if (maxr > MAX_MIB_SIZE) maxr = MAX_MIB_SIZE;
if (maxc > MAX_MIB_SIZE) maxc = MAX_MIB_SIZE;
#endif
for (r = 0; r < maxr; r++) {
MODE_INFO **grid_row;
grid_row = &grid[(mi_row + r) * cm->mi_stride + mi_col];
......
......@@ -1981,7 +1981,19 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_DERING
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128 && bsize == BLOCK_128X128) {
if (cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
aom_read_literal(r, DERING_REFINEMENT_BITS, ACCT_STR);
} else {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
0;
}
} else if (cm->sb_size == BLOCK_64X64 && bsize == BLOCK_64X64) {
#else
if (bsize == BLOCK_64X64) {
#endif
if (cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain =
aom_read_literal(r, DERING_REFINEMENT_BITS, ACCT_STR);
......@@ -1993,8 +2005,51 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
#endif
#if CONFIG_CLPF
if (bsize == BLOCK_64X64 && cm->clpf_strength_y &&
cm->clpf_size != CLPF_NOSIZE) {
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128 && bsize == BLOCK_128X128 &&
cm->clpf_strength_y && cm->clpf_size != CLPF_NOSIZE) {
const int tl = mi_row * MI_SIZE / MIN_FB_SIZE * cm->clpf_stride +
mi_col * MI_SIZE / MIN_FB_SIZE;
if (cm->clpf_size == CLPF_128X128) {
cm->clpf_blocks[tl] = aom_read_literal(r, 1, ACCT_STR);
} else if (cm->clpf_size == CLPF_64X64) {
const int tr = tl + 2;
const int bl = tl + 2 * cm->clpf_stride;
const int br = tr + 2 * cm->clpf_stride;
const int size = 64 / MI_SIZE;
// Up to four bits per SB
if (!clpf_all_skip(cm, mi_col, mi_row, size))
cm->clpf_blocks[tl] = aom_read_literal(r, 1, ACCT_STR);
if (mi_col + size < cm->mi_cols &&
!clpf_all_skip(cm, mi_col + size, mi_row, size))
cm->clpf_blocks[tr] = aom_read_literal(r, 1, ACCT_STR);
if (mi_row + size < cm->mi_rows &&
!clpf_all_skip(cm, mi_col, mi_row + size, size))
cm->clpf_blocks[bl] = aom_read_literal(r, 1, ACCT_STR);
if (mi_col + size < cm->mi_cols && mi_row + size < cm->mi_rows &&
!clpf_all_skip(cm, mi_col + size, mi_row + size, size))
cm->clpf_blocks[br] = aom_read_literal(r, 1, ACCT_STR);
} else if (cm->clpf_size == CLPF_32X32) {
int i, j;
const int size = 32 / MI_SIZE;
for (i = 0; i < 4; ++i)
for (j = 0; j < 4; ++j) {
const int index = tl + i * cm->clpf_stride + j;
if (mi_row + i * size < cm->mi_rows &&
mi_col + j * size < cm->mi_cols &&
!clpf_all_skip(cm, mi_col + j * size, mi_row + i * size, size))
cm->clpf_blocks[index] = aom_read_literal(r, 1, ACCT_STR);
}
}
} else if (cm->sb_size == BLOCK_64X64 && bsize == BLOCK_64X64 &&
#else
if (bsize == BLOCK_64X64 &&
#endif // CONFIG_EXT_PARTITION
cm->clpf_strength_y && cm->clpf_size != CLPF_NOSIZE) {
const int tl = mi_row * MI_SIZE / MIN_FB_SIZE * cm->clpf_stride +
mi_col * MI_SIZE / MIN_FB_SIZE;
......@@ -2027,7 +2082,7 @@ static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
cm->clpf_blocks[br] = aom_read_literal(r, 1, ACCT_STR);
}
}
#endif
#endif // CONFIG_CLPF
}
#if !CONFIG_ANS
......
......@@ -2269,8 +2269,18 @@ static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
#endif // CONFIG_EXT_PARTITION_TYPES
#if CONFIG_DERING
if (bsize == BLOCK_64X64 && cm->dering_level != 0 &&
!sb_all_skip(cm, mi_row, mi_col)) {
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128 && bsize == BLOCK_128X128 &&
cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
aom_write_literal(
w,
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain,
DERING_REFINEMENT_BITS);
} else if (cm->sb_size == BLOCK_64X64 && bsize == BLOCK_64X64 &&
#else
if (bsize == BLOCK_64X64 &&
#endif // CONFIG_EXT_PARTITION
cm->dering_level != 0 && !sb_all_skip(cm, mi_row, mi_col)) {
aom_write_literal(
w,
cm->mi_grid_visible[mi_row * cm->mi_stride + mi_col]->mbmi.dering_gain,
......@@ -2279,8 +2289,50 @@ static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
#endif
#if CONFIG_CLPF
if (bsize == BLOCK_64X64 && cm->clpf_blocks && cm->clpf_strength_y &&
cm->clpf_size != CLPF_NOSIZE) {
#if CONFIG_EXT_PARTITION
if (cm->sb_size == BLOCK_128X128 && bsize == BLOCK_128X128 &&
cm->clpf_blocks && cm->clpf_strength_y && cm->clpf_size != CLPF_NOSIZE) {
const int tl = mi_row * MI_SIZE / MIN_FB_SIZE * cm->clpf_stride +
mi_col * MI_SIZE / MIN_FB_SIZE;
if (cm->clpf_size == CLPF_128X128 && cm->clpf_blocks[tl] != CLPF_NOFLAG) {
aom_write_literal(w, cm->clpf_blocks[tl], 1);
} else if (cm->clpf_size == CLPF_64X64) {
const int tr = tl + 2;
const int bl = tl + 2 * cm->clpf_stride;
const int br = tr + 2 * cm->clpf_stride;
// Up to four bits per SB.
if (cm->clpf_blocks[tl] != CLPF_NOFLAG)
aom_write_literal(w, cm->clpf_blocks[tl], 1);
if (mi_col + MI_SIZE < cm->mi_cols && cm->clpf_blocks[tr] != CLPF_NOFLAG)
aom_write_literal(w, cm->clpf_blocks[tr], 1);
if (mi_row + MI_SIZE < cm->mi_rows && cm->clpf_blocks[bl] != CLPF_NOFLAG)
aom_write_literal(w, cm->clpf_blocks[bl], 1);
if (mi_row + MI_SIZE < cm->mi_rows && mi_col + MI_SIZE < cm->mi_cols &&
cm->clpf_blocks[br] != CLPF_NOFLAG)
aom_write_literal(w, cm->clpf_blocks[br], 1);
} else if (cm->clpf_size == CLPF_32X32) {
int i, j;
const int size = 32 / MI_SIZE;
// Up to sixteen bits per SB.
for (i = 0; i < 4; ++i)
for (j = 0; j < 4; ++j) {
const int index = tl + i * cm->clpf_stride + j;
if (mi_row + i * size < cm->mi_rows &&
mi_col + j * size < cm->mi_cols &&
cm->clpf_blocks[index] != CLPF_NOFLAG)
aom_write_literal(w, cm->clpf_blocks[index], 1);
}
}
} else if (cm->sb_size == BLOCK_64X64 && bsize == BLOCK_64X64 &&
#else
if (bsize == BLOCK_64X64 &&
#endif // CONFIG_EXT_PARTITION
cm->clpf_blocks && cm->clpf_strength_y &&
cm->clpf_size != CLPF_NOSIZE) {
const int tl = mi_row * MI_SIZE / MIN_FB_SIZE * cm->clpf_stride +
mi_col * MI_SIZE / MIN_FB_SIZE;
const int tr = tl + 1;
......@@ -2307,7 +2359,7 @@ static void write_modes_sb(AV1_COMP *const cpi, const TileInfo *const tile,
cm->clpf_blocks[br] != CLPF_NOFLAG)
aom_write_literal(w, cm->clpf_blocks[br], 1);
}
#endif
#endif // CONFIG_CLPF
}
static void write_modes(AV1_COMP *const cpi, const TileInfo *const tile,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment