Commit 9ce3a7d7 authored by Hangyu Kuang's avatar Hangyu Kuang Committed by hkuang

Implement frame parallel decode for VP9.

Using 4 threads, frame parallel decode is ~3x faster than single thread
decode and around 30% faster than tile parallel decode for frame parallel
encoded video on both Android and desktop with 4 threads. Decode speed is
scalable to threads too which means decode could be even faster with more threads.

Change-Id: Ia0a549aaa3e83b5a17b31d8299aa496ea4f21e3e
parent 4d0d7842
......@@ -12,11 +12,37 @@
#include "vpx_mem/vpx_mem.h"
#include "vp9/common/vp9_blockd.h"
#include "vp9/common/vp9_common.h"
#include "vp9/common/vp9_entropymode.h"
#include "vp9/common/vp9_entropymv.h"
#include "vp9/common/vp9_onyxc_int.h"
#include "vp9/common/vp9_systemdependent.h"
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
// frame reference count.
void lock_buffer_pool(BufferPool *const pool) {
#if CONFIG_MULTITHREAD
pthread_mutex_lock(&pool->pool_mutex);
#else
(void)pool;
#endif
}
void unlock_buffer_pool(BufferPool *const pool) {
#if CONFIG_MULTITHREAD
pthread_mutex_unlock(&pool->pool_mutex);
#else
(void)pool;
#endif
}
static INLINE void alloc_mi_array(VP9_COMMON *cm, int mi_size, int idx) {
CHECK_MEM_ERROR(cm, cm->mip_array[idx],
vpx_calloc(mi_size, sizeof(*cm->mip_array[0])));
CHECK_MEM_ERROR(cm, cm->mi_grid_base_array[idx],
vpx_calloc(mi_size, sizeof(*cm->mi_grid_base_array[0])));
}
static void clear_mi_border(const VP9_COMMON *cm, MODE_INFO *mi) {
int i;
......@@ -49,40 +75,47 @@ static void setup_mi(VP9_COMMON *cm) {
vpx_memset(cm->mi_grid_base, 0, cm->mi_stride * (cm->mi_rows + 1) *
sizeof(*cm->mi_grid_base));
clear_mi_border(cm, cm->prev_mip);
// Only clear mi border in non frame-parallel decode. In frame-parallel
// decode, prev_mip is managed by previous decoding thread. While in
// non frame-parallel decode, prev_mip and mip are both managed by
// current decoding thread.
if (!cm->frame_parallel_decode)
clear_mi_border(cm, cm->prev_mip);
}
static int alloc_mi(VP9_COMMON *cm, int mi_size) {
int i;
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
cm->mip_array[i] =
(MODE_INFO *)vpx_calloc(mi_size, sizeof(*cm->mip));
if (cm->mip_array[i] == NULL)
return 1;
cm->mi_grid_base_array[i] =
(MODE_INFO **)vpx_calloc(mi_size, sizeof(*cm->mi_grid_base));
if (cm->mi_grid_base_array[i] == NULL)
return 1;
// Delay reallocation as another thread is accessing prev_mi.
if (cm->frame_parallel_decode && i == cm->prev_mi_idx) {
cm->update_prev_mi = 1;
continue;
}
alloc_mi_array(cm, mi_size, i);
}
// Init the index.
cm->mi_idx = 0;
cm->prev_mi_idx = 1;
cm->mip = cm->mip_array[cm->mi_idx];
cm->prev_mip = cm->mip_array[cm->prev_mi_idx];
cm->mi_grid_base = cm->mi_grid_base_array[cm->mi_idx];
cm->prev_mi_grid_base = cm->mi_grid_base_array[cm->prev_mi_idx];
if (!cm->frame_parallel_decode) {
cm->mi_idx = 0;
cm->prev_mi_idx = 1;
// In frame-parallel decode, prev_mip comes from another thread,
// so current decoding thread should not touch it.
cm->prev_mip = cm->mip_array[cm->prev_mi_idx];
cm->prev_mi_grid_base = cm->mi_grid_base_array[cm->prev_mi_idx];
}
return 0;
}
static void free_mi(VP9_COMMON *cm) {
static void free_mi(VP9_COMMON *cm, int decode_done) {
int i;
for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
if (cm->frame_parallel_decode && i == cm->prev_mi_idx && !decode_done)
continue;
vpx_free(cm->mip_array[i]);
cm->mip_array[i] = NULL;
vpx_free(cm->mi_grid_base_array[i]);
......@@ -90,9 +123,12 @@ static void free_mi(VP9_COMMON *cm) {
}
cm->mip = NULL;
cm->prev_mip = NULL;
cm->mi_grid_base = NULL;
cm->prev_mi_grid_base = NULL;
if (!cm->frame_parallel_decode) {
cm->prev_mip = NULL;
cm->prev_mi_grid_base = NULL;
}
}
static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) {
......@@ -109,7 +145,10 @@ static int alloc_seg_map(VP9_COMMON *cm, int seg_map_size) {
cm->prev_seg_map_idx = 1;
cm->current_frame_seg_map = cm->seg_map_array[cm->seg_map_idx];
cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
if (!cm->frame_parallel_decode) {
cm->last_frame_seg_map = cm->seg_map_array[cm->prev_seg_map_idx];
}
return 0;
}
......@@ -123,7 +162,10 @@ static void free_seg_map(VP9_COMMON *cm) {
}
cm->current_frame_seg_map = NULL;
cm->last_frame_seg_map = NULL;
if (!cm->frame_parallel_decode) {
cm->last_frame_seg_map = NULL;
}
}
void vp9_free_frame_buffers(VP9_COMMON *cm) {
......@@ -144,8 +186,7 @@ void vp9_free_frame_buffers(VP9_COMMON *cm) {
}
void vp9_free_context_buffers(VP9_COMMON *cm) {
free_mi(cm);
free_mi(cm, 1);
free_seg_map(cm);
vpx_free(cm->above_context);
......@@ -170,7 +211,7 @@ int vp9_resize_frame_buffers(VP9_COMMON *cm, int width, int height) {
set_mb_mi(cm, aligned_width, aligned_height);
free_mi(cm);
free_mi(cm, 0);
if (alloc_mi(cm, cm->mi_stride * (cm->mi_rows + MI_BLOCK_SIZE)))
goto fail;
......@@ -288,7 +329,6 @@ int vp9_alloc_context_buffers(VP9_COMMON *cm, int width, int height) {
void vp9_remove_common(VP9_COMMON *cm) {
vp9_free_frame_buffers(cm);
vp9_free_context_buffers(cm);
vp9_free_internal_frame_buffers(&cm->buffer_pool->int_frame_buffers);
}
void vp9_update_frame_size(VP9_COMMON *cm) {
......@@ -306,6 +346,20 @@ void vp9_update_frame_size(VP9_COMMON *cm) {
void vp9_swap_mi_and_prev_mi(VP9_COMMON *cm) {
// Swap indices.
const int tmp = cm->mi_idx;
// Only used in frame parallel decode: Update the prev_mi buffer if
// needed. The worker that was accessing it must already finish decoding.
// So it can be resized safely now.
if (cm->update_prev_mi) {
const int mi_size = cm->mi_stride * (cm->mi_rows + MI_BLOCK_SIZE);
vpx_free(cm->mip_array[cm->prev_mi_idx]);
vpx_free(cm->mi_grid_base_array[cm->prev_mi_idx]);
cm->mip_array[cm->prev_mi_idx] = NULL;
cm->mi_grid_base_array[cm->prev_mi_idx] = NULL;
alloc_mi_array(cm, mi_size, cm->prev_mi_idx);
cm->update_prev_mi = 0;
}
cm->mi_idx = cm->prev_mi_idx;
cm->prev_mi_idx = tmp;
......
......@@ -439,7 +439,8 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
int i;
vp9_clearall_segfeatures(&cm->seg);
cm->seg.abs_delta = SEGMENT_DELTADATA;
if (cm->last_frame_seg_map)
if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
vpx_memset(cm->last_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
if (cm->current_frame_seg_map)
......@@ -467,7 +468,7 @@ void vp9_setup_past_independence(VP9_COMMON *cm) {
cm->frame_contexts[cm->frame_context_idx] = cm->fc;
}
if (frame_is_intra_only(cm))
if (frame_is_intra_only(cm) && !cm->frame_parallel_decode)
vpx_memset(cm->prev_mip, 0, cm->mi_stride * (cm->mi_rows + 1) *
sizeof(*cm->prev_mip));
......
......@@ -17,14 +17,12 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
int block, int mi_row, int mi_col) {
int block, int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data) {
const int *ref_sign_bias = cm->ref_frame_sign_bias;
int i, refmv_count = 0;
const MODE_INFO *prev_mi = cm->coding_use_prev_mi && cm->prev_mi
? cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col]
: NULL;
const MB_MODE_INFO *const prev_mbmi = prev_mi ? &prev_mi->mbmi : NULL;
MODE_INFO *prev_mi = NULL;
MB_MODE_INFO *prev_mbmi = NULL;
const POSITION *const mv_ref_search = mv_ref_blocks[mi->mbmi.sb_type];
......@@ -71,6 +69,14 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
}
}
// Synchronize here for frame parallel decode if sync function is provided.
if (sync != NULL) {
sync(data, mi_row);
}
prev_mi = cm->coding_use_prev_mi && cm->prev_mi ?
cm->prev_mi_grid_visible[mi_row * xd->mi_stride + mi_col] : NULL;
prev_mbmi = prev_mi ? &prev_mi->mbmi : NULL;
// Check the last frame's mode and mv info.
if (prev_mbmi) {
if (prev_mbmi->ref_frame[0] == ref_frame)
......@@ -109,12 +115,13 @@ static void find_mv_refs_idx(const VP9_COMMON *cm, const MACROBLOCKD *xd,
}
void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
int mi_row, int mi_col) {
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list,
int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data) {
find_mv_refs_idx(cm, xd, tile, mi, ref_frame, mv_ref_list, -1,
mi_row, mi_col);
mi_row, mi_col, sync, data);
}
static void lower_mv_precision(MV *mv, int allow_hp) {
......@@ -152,7 +159,7 @@ void vp9_append_sub8x8_mvs_for_idx(VP9_COMMON *cm, MACROBLOCKD *xd,
assert(MAX_MV_REF_CANDIDATES == 2);
find_mv_refs_idx(cm, xd, tile, mi, mi->mbmi.ref_frame[ref], mv_list, block,
mi_row, mi_col);
mi_row, mi_col, NULL, NULL);
near->as_int = 0;
switch (block) {
......
......@@ -204,10 +204,12 @@ static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
xd->mb_to_bottom_edge + RIGHT_BOTTOM_MARGIN);
}
typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
void vp9_find_mv_refs(const VP9_COMMON *cm, const MACROBLOCKD *xd,
const TileInfo *const tile,
MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
int_mv *mv_ref_list, int mi_row, int mi_col);
int_mv *mv_ref_list, int mi_row, int mi_col,
find_mv_refs_sync sync, void *const data);
// check a list of motion vectors by sad score using a number rows of pixels
// above and a number cols of pixels in the left to select the one with best
......
......@@ -36,10 +36,13 @@ extern "C" {
#define REF_FRAMES_LOG2 3
#define REF_FRAMES (1 << REF_FRAMES_LOG2)
// 1 scratch frame for the new frame, 3 for scaled references on the encoder
// 4 scratch frames for the new frames to support a maximum of 4 cores decoding
// in parallel, 3 for scaled references on the encoder.
// TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
// of framebuffers.
// TODO(jkoleszar): These 3 extra references could probably come from the
// normal reference pool.
#define FRAME_BUFFERS (REF_FRAMES + 4)
#define FRAME_BUFFERS (REF_FRAMES + 7)
#define FRAME_CONTEXTS_LOG2 2
#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
......@@ -64,6 +67,18 @@ typedef struct {
int ref_count;
vpx_codec_frame_buffer_t raw_frame_buffer;
YV12_BUFFER_CONFIG buf;
// The Following variables will only be used in frame parallel decode.
// frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
// that no FrameWorker owns, or is decoding, this buffer.
VP9Worker *frame_worker_owner;
// row and col indicate which position frame has been decoded to in real
// pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
// when the frame is fully decoded.
int row;
int col;
} RefCntBuffer;
typedef struct {
......@@ -114,6 +129,10 @@ typedef struct VP9Common {
int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
// Prepare ref_frame_map for the next frame.
// Only used in frame parallel decode.
int next_ref_frame_map[REF_FRAMES];
// TODO(jkoleszar): could expand active_ref_idx to 4, with 0 as intra, and
// roll new_fb_idx into it.
......@@ -178,6 +197,9 @@ typedef struct VP9Common {
MODE_INFO **prev_mi_grid_base;
MODE_INFO **prev_mi_grid_visible;
// Used in frame parallel decode for delay resizing prev_mi.
int update_prev_mi;
// Persistent mb segment id map used in prediction.
int seg_map_idx;
int prev_seg_map_idx;
......@@ -197,6 +219,10 @@ typedef struct VP9Common {
struct loopfilter lf;
struct segmentation seg;
// TODO(hkuang): Remove this as it is the same as frame_parallel_decode
// in pbi.
int frame_parallel_decode; // frame-based threading.
// Context probabilities for reference frame prediction
int allow_comp_inter_inter;
MV_REFERENCE_FRAME comp_fixed_ref;
......@@ -235,6 +261,11 @@ typedef struct VP9Common {
ENTROPY_CONTEXT *above_context;
} VP9_COMMON;
// TODO(hkuang): Don't need to lock the whole pool after implementing atomic
// frame reference count.
void lock_buffer_pool(BufferPool *const pool);
void unlock_buffer_pool(BufferPool *const pool);
static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) {
return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
......@@ -242,12 +273,15 @@ static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP9_COMMON *cm) {
static INLINE int get_free_fb(VP9_COMMON *cm) {
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
lock_buffer_pool(cm->buffer_pool);
for (i = 0; i < FRAME_BUFFERS; ++i)
if (frame_bufs[i].ref_count == 0)
break;
assert(i < FRAME_BUFFERS);
frame_bufs[i].ref_count = 1;
unlock_buffer_pool(cm->buffer_pool);
return i;
}
......
This diff is collapsed.
......@@ -25,7 +25,8 @@ void vp9_decode_frame(struct VP9Decoder *pbi,
const uint8_t *data, const uint8_t *data_end,
const uint8_t **p_data_end);
void vp9_dec_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
void vp9_dec_build_inter_predictors_sb(struct VP9Decoder *const pbi,
MACROBLOCKD *xd, int mi_row, int mi_col,
BLOCK_SIZE bsize);
#ifdef __cplusplus
} // extern "C"
......
......@@ -420,11 +420,18 @@ static int read_is_inter_block(VP9_COMMON *const cm, MACROBLOCKD *const xd,
}
}
static void read_inter_block_mode_info(VP9_COMMON *const cm,
static void fpm_sync(void *const data, int mi_row) {
VP9Decoder *const pbi = (VP9Decoder *)data;
vp9_frameworker_wait(pbi->frame_worker_owner, pbi->prev_buf,
mi_row << MI_BLOCK_SIZE_LOG2);
}
static void read_inter_block_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
const TileInfo *const tile,
MODE_INFO *const mi,
int mi_row, int mi_col, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MB_MODE_INFO *const mbmi = &mi->mbmi;
const BLOCK_SIZE bsize = mbmi->sb_type;
const int allow_hp = cm->allow_high_precision_mv;
......@@ -438,7 +445,7 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm,
for (ref = 0; ref < 1 + is_compound; ++ref) {
const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
vp9_find_mv_refs(cm, xd, tile, mi, frame, mbmi->ref_mvs[frame],
mi_row, mi_col);
mi_row, mi_col, fpm_sync, (void *)pbi);
}
inter_mode_ctx = mbmi->mode_context[mbmi->ref_frame[0]];
......@@ -512,10 +519,13 @@ static void read_inter_block_mode_info(VP9_COMMON *const cm,
}
}
static void read_inter_frame_mode_info(VP9_COMMON *const cm,
// TODO(hkuang): Pass cm instead of pbi. This requires change in
// vp9_frameworker_wait.
static void read_inter_frame_mode_info(VP9Decoder *const pbi,
MACROBLOCKD *const xd,
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
MODE_INFO *const mi = xd->mi[0];
MB_MODE_INFO *const mbmi = &mi->mbmi;
int inter_block;
......@@ -529,16 +539,17 @@ static void read_inter_frame_mode_info(VP9_COMMON *const cm,
!mbmi->skip || !inter_block, r);
if (inter_block)
read_inter_block_mode_info(cm, xd, tile, mi, mi_row, mi_col, r);
read_inter_block_mode_info(pbi, xd, tile, mi, mi_row, mi_col, r);
else
read_intra_block_mode_info(cm, mi, r);
}
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
const TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r) {
VP9_COMMON *const cm = &pbi->common;
if (frame_is_intra_only(cm))
read_intra_frame_mode_info(cm, xd, mi_row, mi_col, r);
else
read_inter_frame_mode_info(cm, xd, tile, mi_row, mi_col, r);
read_inter_frame_mode_info(pbi, xd, tile, mi_row, mi_col, r);
}
......@@ -11,6 +11,7 @@
#ifndef VP9_DECODER_VP9_DECODEMV_H_
#define VP9_DECODER_VP9_DECODEMV_H_
#include "vp9/decoder/vp9_decoder.h"
#include "vp9/decoder/vp9_reader.h"
#ifdef __cplusplus
......@@ -19,7 +20,7 @@ extern "C" {
struct TileInfo;
void vp9_read_mode_info(VP9_COMMON *cm, MACROBLOCKD *xd,
void vp9_read_mode_info(VP9Decoder *const pbi, MACROBLOCKD *xd,
const struct TileInfo *const tile,
int mi_row, int mi_col, vp9_reader *r);
......
......@@ -26,6 +26,7 @@
#endif
#include "vp9/common/vp9_quant_common.h"
#include "vp9/common/vp9_systemdependent.h"
#include "vp9/common/vp9_thread.h"
#include "vp9/decoder/vp9_decodeframe.h"
#include "vp9/decoder/vp9_decoder.h"
......@@ -63,6 +64,7 @@ VP9Decoder *vp9_decoder_create(BufferPool *const pool) {
// Initialize the references to not point to any frame buffers.
vpx_memset(&cm->ref_frame_map, -1, sizeof(cm->ref_frame_map));
vpx_memset(&cm->next_ref_frame_map, -1, sizeof(cm->next_ref_frame_map));
cm->current_video_frame = 0;
pbi->ready_for_new_data = 1;
......@@ -195,29 +197,51 @@ int vp9_get_reference_dec(VP9Decoder *pbi, int index, YV12_BUFFER_CONFIG **fb) {
return 0;
}
static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
BufferPool *const pool) {
if (idx >= 0) {
--frame_bufs[idx].ref_count;
if (frame_bufs[idx].ref_count == 0) {
pool->release_fb_cb(pool->cb_priv, &frame_bufs[idx].raw_frame_buffer);
}
}
}
/* If any buffer updating is signaled it should be done here. */
static void swap_frame_buffers(VP9Decoder *pbi) {
int ref_index = 0, mask;
VP9_COMMON * const cm = &pbi->common;
BufferPool * const pool = cm->buffer_pool;
VP9_COMMON *const cm = &pbi->common;
BufferPool *const pool = cm->buffer_pool;
RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
lock_buffer_pool(pool);
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
if (mask & 1) {
const int old_idx = cm->ref_frame_map[ref_index];
ref_cnt_fb(frame_bufs, &cm->ref_frame_map[ref_index],
cm->new_fb_idx);
if (old_idx >= 0 && frame_bufs[old_idx].ref_count == 0)
pool->release_fb_cb(pool->cb_priv,
&frame_bufs[old_idx].raw_frame_buffer);
const int old_idx = cm->ref_frame_map[ref_index];
// Current thread releases the holding of reference frame.
decrease_ref_count(old_idx, frame_bufs, pool);
// Release the reference frame in reference map.
if ((mask & 1) && old_idx >= 0) {
decrease_ref_count(old_idx, frame_bufs, pool);
}
cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
++ref_index;
}
// Current thread releases the holding of reference frame.
for (; ref_index < REF_FRAMES && !cm->show_existing_frame; ++ref_index) {
const int old_idx = cm->ref_frame_map[ref_index];
decrease_ref_count(old_idx, frame_bufs, pool);
cm->ref_frame_map[ref_index] = cm->next_ref_frame_map[ref_index];
}
unlock_buffer_pool(pool);
cm->frame_to_show = get_frame_new_buffer(cm);
if (!pbi->frame_parallel_decode || !cm->show_frame) {
lock_buffer_pool(pool);
--frame_bufs[cm->new_fb_idx].ref_count;
unlock_buffer_pool(pool);
}
// Invalidate these references until the next frame starts.
......@@ -256,6 +280,20 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
&frame_bufs[cm->new_fb_idx].raw_frame_buffer);
cm->new_fb_idx = get_free_fb(cm);
if (pbi->frame_parallel_decode) {
VP9Worker *const worker = pbi->frame_worker_owner;
vp9_frameworker_lock_stats(worker);
frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
// Reset decoding progress.
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
pbi->cur_buf->row = -1;
pbi->cur_buf->col = -1;
vp9_frameworker_unlock_stats(worker);
} else {
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
}
if (setjmp(cm->error.jmp)) {
cm->error.setjmp = 0;
......@@ -283,19 +321,38 @@ int vp9_receive_compressed_data(VP9Decoder *pbi,
vp9_clear_system_state();
cm->last_width = cm->width;
cm->last_height = cm->height;
if (!cm->show_existing_frame)
cm->last_show_frame = cm->show_frame;
if (cm->show_frame) {
if (!cm->show_existing_frame)
vp9_swap_mi_and_prev_mi(cm);
cm->current_video_frame++;
}
// Update progress in frame parallel decode.
if (pbi->frame_parallel_decode) {
// Need to lock the mutex here as another thread may
// be accessing this buffer.
VP9Worker *const worker = pbi->frame_worker_owner;
FrameWorkerData *const frame_worker_data = worker->data1;
vp9_frameworker_lock_stats(worker);
if (cm->show_frame) {
if (!cm->show_existing_frame)
vp9_swap_mi_and_prev_mi(cm);
cm->current_video_frame++;
}
vp9_swap_current_and_last_seg_map(cm);
frame_worker_data->frame_decoded = 1;
frame_worker_data->frame_context_ready = 1;
vp9_frameworker_signal_stats(worker);
vp9_frameworker_unlock_stats(worker);
} else {
cm->last_width = cm->width;
cm->last_height = cm->height;
if (cm->show_frame) {
if (!cm->show_existing_frame)
vp9_swap_mi_and_prev_mi(cm);
cm->current_video_frame++;
}
vp9_swap_current_and_last_seg_map(cm);
vp9_swap_current_and_last_seg_map(cm);
}
pbi->ready_for_new_data = 0;
......
......@@ -45,6 +45,12 @@ typedef struct VP9Decoder {
int frame_parallel_decode; // frame-based threading.
// TODO(hkuang): Combine this with cur_buf in macroblockd as they are
// the same.
RefCntBuffer *cur_buf; // Current decoding frame buffer.
RefCntBuffer *prev_buf; // Previous decoding frame buffer.
VP9Worker *frame_worker_owner; // frame_worker that owns this pbi.
VP9Worker lf_worker;
VP9Worker *tile_workers;
int num_tile_workers;
......
......@@ -17,6 +17,8 @@
#include "vp9/decoder/vp9_dthread.h"
#include "vp9/decoder/vp9_decoder.h"
// #define DEBUG_THREAD
#if CONFIG_MULTITHREAD
static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
const int kMaxTryLocks = 4000;
......@@ -279,3 +281,166 @@ void vp9_loop_filter_dealloc(VP9LfSync *lf_sync, int rows) {
vp9_zero(*lf_sync);
}
}
// TODO(hkuang): Clean up all the #ifdef in this file.
void vp9_frameworker_lock_stats(VP9Worker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_lock(&worker_data->stats_mutex);
#else
(void)worker;
#endif
}
void vp9_frameworker_unlock_stats(VP9Worker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
pthread_mutex_unlock(&worker_data->stats_mutex);
#else
(void)worker;
#endif
}
void vp9_frameworker_signal_stats(VP9Worker *const worker) {
#if CONFIG_MULTITHREAD
FrameWorkerData *const worker_data = worker->data1;
// TODO(hkuang): Investigate using broadcast or signal.
pthread_cond_signal(&worker_data->stats_cond);
#else
(void)worker;
#endif
}
// TODO(hkuang): Remove worker parameter as it is only used in debug code.
void vp9_frameworker_wait(VP9Worker *const worker, RefCntBuffer *const ref_buf,
int row) {
#if CONFIG_MULTITHREAD
if (!ref_buf)
return;
// Enabling the following line of code will get harmless tsan error but
// will get best performance.