Commit 109b69a7 authored by John Koleszar's avatar John Koleszar

RTCD: add arnr functions

This commit continues the process of converting to the new RTCD
system. It removes the last of the VP8_ENCODER_RTCD struct references.

Change-Id: I2a44f52d7cccf5177e1ca98a028ead570d045395
parent 0b0bc8d0
...@@ -287,10 +287,6 @@ typedef struct macroblockd ...@@ -287,10 +287,6 @@ typedef struct macroblockd
*/ */
DECLARE_ALIGNED(32, unsigned char, y_buf[22*32]); DECLARE_ALIGNED(32, unsigned char, y_buf[22*32]);
#endif #endif
#if CONFIG_RUNTIME_CPU_DETECT
struct VP8_COMMON_RTCD *rtcd;
#endif
} MACROBLOCKD; } MACROBLOCKD;
......
...@@ -481,6 +481,13 @@ vp8_refining_search_sad_sse3=vp8_refining_search_sadx4 ...@@ -481,6 +481,13 @@ vp8_refining_search_sad_sse3=vp8_refining_search_sadx4
prototype int vp8_diamond_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv" prototype int vp8_diamond_search_sad "struct macroblock *x, struct block *b, struct blockd *d, union int_mv *ref_mv, union int_mv *best_mv, int search_param, int sad_per_bit, int *num00, struct variance_vtable *fn_ptr, int *mvcost[2], union int_mv *center_mv"
vp8_diamond_search_sad_sse3=vp8_diamond_search_sadx4 vp8_diamond_search_sad_sse3=vp8_diamond_search_sadx4
#
# Alt-ref Noise Reduction (ARNR)
#
if [ "$CONFIG_REALTIME_ONLY" != "yes" ]; then
prototype void vp8_temporal_filter_apply "unsigned char *frame1, unsigned int stride, unsigned char *frame2, unsigned int block_size, int strength, int filter_weight, unsigned int *accumulator, unsigned short *count"
specialize vp8_temporal_filter_apply sse2
fi
# End of encoder only functions # End of encoder only functions
fi fi
...@@ -93,11 +93,6 @@ void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd) ...@@ -93,11 +93,6 @@ void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd)
} }
} }
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x)
#else
#define RTCD_VTABLE(x) NULL
#endif
static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd, static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
unsigned int mb_idx) unsigned int mb_idx)
......
...@@ -30,12 +30,6 @@ ...@@ -30,12 +30,6 @@
extern void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd); extern void mb_init_dequantizer(VP8D_COMP *pbi, MACROBLOCKD *xd);
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD_VTABLE(x) (&(pbi)->common.rtcd.x)
#else
#define RTCD_VTABLE(x) NULL
#endif
static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count) static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_DEC *mbrd, int count)
{ {
VP8_COMMON *const pc = & pbi->common; VP8_COMMON *const pc = & pbi->common;
...@@ -44,9 +38,6 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_D ...@@ -44,9 +38,6 @@ static void setup_decoding_thread_data(VP8D_COMP *pbi, MACROBLOCKD *xd, MB_ROW_D
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
{ {
MACROBLOCKD *mbd = &mbrd[i].mbd; MACROBLOCKD *mbd = &mbrd[i].mbd;
#if CONFIG_RUNTIME_CPU_DETECT
mbd->rtcd = xd->rtcd;
#endif
mbd->subpixel_predict = xd->subpixel_predict; mbd->subpixel_predict = xd->subpixel_predict;
mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; mbd->subpixel_predict8x4 = xd->subpixel_predict8x4;
mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; mbd->subpixel_predict8x8 = xd->subpixel_predict8x8;
......
...@@ -29,13 +29,6 @@ ...@@ -29,13 +29,6 @@
#include "vp8/common/invtrans.h" #include "vp8/common/invtrans.h"
#include "vpx_ports/vpx_timer.h" #include "vpx_ports/vpx_timer.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define RTCD(x) &cpi->common.rtcd.x
#define IF_RTCD(x) (x)
#else
#define RTCD(x) NULL
#define IF_RTCD(x) NULL
#endif
extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ; extern void vp8_stuff_mb(VP8_COMP *cpi, MACROBLOCKD *x, TOKENEXTRA **t) ;
extern void vp8_calc_ref_frame_costs(int *ref_frame_cost, extern void vp8_calc_ref_frame_costs(int *ref_frame_cost,
int prob_intra, int prob_intra,
...@@ -1097,11 +1090,11 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t, ...@@ -1097,11 +1090,11 @@ int vp8cx_encode_intra_macro_block(VP8_COMP *cpi, MACROBLOCK *x, TOKENEXTRA **t,
} }
if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED) if (x->e_mbd.mode_info_context->mbmi.mode == B_PRED)
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x); vp8_encode_intra4x4mby(x);
else else
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x); vp8_encode_intra16x16mby(x);
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x); vp8_encode_intra16x16mbuv(x);
sum_intra_stats(cpi, x); sum_intra_stats(cpi, x);
vp8_tokenize_mb(cpi, &x->e_mbd, t); vp8_tokenize_mb(cpi, &x->e_mbd, t);
...@@ -1239,15 +1232,15 @@ int vp8cx_encode_inter_macroblock ...@@ -1239,15 +1232,15 @@ int vp8cx_encode_inter_macroblock
if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME) if (xd->mode_info_context->mbmi.ref_frame == INTRA_FRAME)
{ {
vp8_encode_intra16x16mbuv(IF_RTCD(&cpi->rtcd), x); vp8_encode_intra16x16mbuv(x);
if (xd->mode_info_context->mbmi.mode == B_PRED) if (xd->mode_info_context->mbmi.mode == B_PRED)
{ {
vp8_encode_intra4x4mby(IF_RTCD(&cpi->rtcd), x); vp8_encode_intra4x4mby(x);
} }
else else
{ {
vp8_encode_intra16x16mby(IF_RTCD(&cpi->rtcd), x); vp8_encode_intra16x16mby(x);
} }
sum_intra_stats(cpi, x); sum_intra_stats(cpi, x);
...@@ -1269,7 +1262,7 @@ int vp8cx_encode_inter_macroblock ...@@ -1269,7 +1262,7 @@ int vp8cx_encode_inter_macroblock
if (!x->skip) if (!x->skip)
{ {
vp8_encode_inter16x16(IF_RTCD(&cpi->rtcd), x); vp8_encode_inter16x16(x);
// Clear mb_skip_coeff if mb_no_coeff_skip is not set // Clear mb_skip_coeff if mb_no_coeff_skip is not set
if (!cpi->common.mb_no_coeff_skip) if (!cpi->common.mb_no_coeff_skip)
......
...@@ -18,12 +18,6 @@ ...@@ -18,12 +18,6 @@
#include "encodeintra.h" #include "encodeintra.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
#define IF_RTCD(x) NULL
#endif
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
{ {
...@@ -33,13 +27,11 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) ...@@ -33,13 +27,11 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
if (use_dc_pred) if (use_dc_pred)
{ {
const VP8_ENCODER_RTCD *rtcd = IF_RTCD(&cpi->rtcd);
x->e_mbd.mode_info_context->mbmi.mode = DC_PRED; x->e_mbd.mode_info_context->mbmi.mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED; x->e_mbd.mode_info_context->mbmi.uv_mode = DC_PRED;
x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME; x->e_mbd.mode_info_context->mbmi.ref_frame = INTRA_FRAME;
vp8_encode_intra16x16mby(rtcd, x); vp8_encode_intra16x16mby(x);
vp8_inverse_transform_mby(&x->e_mbd); vp8_inverse_transform_mby(&x->e_mbd);
} }
...@@ -48,7 +40,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) ...@@ -48,7 +40,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
{ {
x->e_mbd.block[i].bmi.as_mode = B_DC_PRED; x->e_mbd.block[i].bmi.as_mode = B_DC_PRED;
vp8_encode_intra4x4block(IF_RTCD(&cpi->rtcd), x, i); vp8_encode_intra4x4block(x, i);
} }
} }
...@@ -57,8 +49,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred) ...@@ -57,8 +49,7 @@ int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred)
return intra_pred_var; return intra_pred_var;
} }
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, void vp8_encode_intra4x4block(MACROBLOCK *x, int ib)
MACROBLOCK *x, int ib)
{ {
BLOCKD *b = &x->e_mbd.block[ib]; BLOCKD *b = &x->e_mbd.block[ib];
BLOCK *be = &x->block[ib]; BLOCK *be = &x->block[ib];
...@@ -86,7 +77,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, ...@@ -86,7 +77,7 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
} }
} }
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) void vp8_encode_intra4x4mby(MACROBLOCK *mb)
{ {
int i; int i;
...@@ -94,11 +85,11 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb) ...@@ -94,11 +85,11 @@ void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *mb)
vp8_intra_prediction_down_copy(x); vp8_intra_prediction_down_copy(x);
for (i = 0; i < 16; i++) for (i = 0; i < 16; i++)
vp8_encode_intra4x4block(rtcd, mb, i); vp8_encode_intra4x4block(mb, i);
return; return;
} }
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) void vp8_encode_intra16x16mby(MACROBLOCK *x)
{ {
BLOCK *b = &x->block[0]; BLOCK *b = &x->block[0];
MACROBLOCKD *xd = &x->e_mbd; MACROBLOCKD *xd = &x->e_mbd;
...@@ -113,10 +104,10 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) ...@@ -113,10 +104,10 @@ void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
vp8_quantize_mby(x); vp8_quantize_mby(x);
if (x->optimize) if (x->optimize)
vp8_optimize_mby(x, rtcd); vp8_optimize_mby(x);
} }
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) void vp8_encode_intra16x16mbuv(MACROBLOCK *x)
{ {
MACROBLOCKD *xd = &x->e_mbd; MACROBLOCKD *xd = &x->e_mbd;
...@@ -131,5 +122,5 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) ...@@ -131,5 +122,5 @@ void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x)
vp8_quantize_mbuv(x); vp8_quantize_mbuv(x);
if (x->optimize) if (x->optimize)
vp8_optimize_mbuv(x, rtcd); vp8_optimize_mbuv(x);
} }
...@@ -14,9 +14,8 @@ ...@@ -14,9 +14,8 @@
#include "onyx_int.h" #include "onyx_int.h"
int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred); int vp8_encode_intra(VP8_COMP *cpi, MACROBLOCK *x, int use_dc_pred);
void vp8_encode_intra16x16mby(const VP8_ENCODER_RTCD *, MACROBLOCK *x); void vp8_encode_intra16x16mby(MACROBLOCK *x);
void vp8_encode_intra16x16mbuv(const VP8_ENCODER_RTCD *, MACROBLOCK *x); void vp8_encode_intra16x16mbuv(MACROBLOCK *x);
void vp8_encode_intra4x4mby(const VP8_ENCODER_RTCD *, MACROBLOCK *mb); void vp8_encode_intra4x4mby(MACROBLOCK *mb);
void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd, void vp8_encode_intra4x4block(MACROBLOCK *x, int ib);
MACROBLOCK *x, int ib);
#endif #endif
...@@ -19,11 +19,6 @@ ...@@ -19,11 +19,6 @@
#include "vpx_mem/vpx_mem.h" #include "vpx_mem/vpx_mem.h"
#include "rdopt.h" #include "rdopt.h"
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
#define IF_RTCD(x) NULL
#endif
void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch) void vp8_subtract_b_c(BLOCK *be, BLOCKD *bd, int pitch)
{ {
unsigned char *src_ptr = (*(be->base_src) + be->src); unsigned char *src_ptr = (*(be->base_src) + be->src);
...@@ -98,7 +93,7 @@ void vp8_subtract_mby_c(short *diff, unsigned char *src, int src_stride, ...@@ -98,7 +93,7 @@ void vp8_subtract_mby_c(short *diff, unsigned char *src, int src_stride,
} }
} }
static void vp8_subtract_mb(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) static void vp8_subtract_mb(MACROBLOCK *x)
{ {
BLOCK *b = &x->block[0]; BLOCK *b = &x->block[0];
...@@ -227,8 +222,7 @@ static const int plane_rd_mult[4]= ...@@ -227,8 +222,7 @@ static const int plane_rd_mult[4]=
}; };
static void optimize_b(MACROBLOCK *mb, int ib, int type, static void optimize_b(MACROBLOCK *mb, int ib, int type,
ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
const VP8_ENCODER_RTCD *rtcd)
{ {
BLOCK *b; BLOCK *b;
BLOCKD *d; BLOCKD *d;
...@@ -509,7 +503,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type, ...@@ -509,7 +503,7 @@ static void check_reset_2nd_coeffs(MACROBLOCKD *x, int type,
} }
} }
static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) static void optimize_mb(MACROBLOCK *x)
{ {
int b; int b;
int type; int type;
...@@ -532,27 +526,27 @@ static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) ...@@ -532,27 +526,27 @@ static void optimize_mb(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
for (b = 0; b < 16; b++) for (b = 0; b < 16; b++)
{ {
optimize_b(x, b, type, optimize_b(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd); ta + vp8_block2above[b], tl + vp8_block2left[b]);
} }
for (b = 16; b < 24; b++) for (b = 16; b < 24; b++)
{ {
optimize_b(x, b, PLANE_TYPE_UV, optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd); ta + vp8_block2above[b], tl + vp8_block2left[b]);
} }
if (has_2nd_order) if (has_2nd_order)
{ {
b=24; b=24;
optimize_b(x, b, PLANE_TYPE_Y2, optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd); ta + vp8_block2above[b], tl + vp8_block2left[b]);
check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b]); ta + vp8_block2above[b], tl + vp8_block2left[b]);
} }
} }
void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) void vp8_optimize_mby(MACROBLOCK *x)
{ {
int b; int b;
int type; int type;
...@@ -581,7 +575,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) ...@@ -581,7 +575,7 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
for (b = 0; b < 16; b++) for (b = 0; b < 16; b++)
{ {
optimize_b(x, b, type, optimize_b(x, b, type,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd); ta + vp8_block2above[b], tl + vp8_block2left[b]);
} }
...@@ -589,13 +583,13 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) ...@@ -589,13 +583,13 @@ void vp8_optimize_mby(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
{ {
b=24; b=24;
optimize_b(x, b, PLANE_TYPE_Y2, optimize_b(x, b, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd); ta + vp8_block2above[b], tl + vp8_block2left[b]);
check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2, check_reset_2nd_coeffs(&x->e_mbd, PLANE_TYPE_Y2,
ta + vp8_block2above[b], tl + vp8_block2left[b]); ta + vp8_block2above[b], tl + vp8_block2left[b]);
} }
} }
void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) void vp8_optimize_mbuv(MACROBLOCK *x)
{ {
int b; int b;
ENTROPY_CONTEXT_PLANES t_above, t_left; ENTROPY_CONTEXT_PLANES t_above, t_left;
...@@ -617,26 +611,26 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd) ...@@ -617,26 +611,26 @@ void vp8_optimize_mbuv(MACROBLOCK *x, const VP8_ENCODER_RTCD *rtcd)
for (b = 16; b < 24; b++) for (b = 16; b < 24; b++)
{ {
optimize_b(x, b, PLANE_TYPE_UV, optimize_b(x, b, PLANE_TYPE_UV,
ta + vp8_block2above[b], tl + vp8_block2left[b], rtcd); ta + vp8_block2above[b], tl + vp8_block2left[b]);
} }
} }
void vp8_encode_inter16x16(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) void vp8_encode_inter16x16(MACROBLOCK *x)
{ {
vp8_build_inter_predictors_mb(&x->e_mbd); vp8_build_inter_predictors_mb(&x->e_mbd);
vp8_subtract_mb(rtcd, x); vp8_subtract_mb(x);
transform_mb(x); transform_mb(x);
vp8_quantize_mb(x); vp8_quantize_mb(x);
if (x->optimize) if (x->optimize)
optimize_mb(x, rtcd); optimize_mb(x);
} }
/* this funciton is used by first pass only */ /* this funciton is used by first pass only */
void vp8_encode_inter16x16y(const VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x) void vp8_encode_inter16x16y(MACROBLOCK *x)
{ {
BLOCK *b = &x->block[0]; BLOCK *b = &x->block[0];
......
...@@ -13,15 +13,14 @@ ...@@ -13,15 +13,14 @@
#define __INC_ENCODEMB_H #define __INC_ENCODEMB_H
#include "onyx_int.h" #include "onyx_int.h"
struct VP8_ENCODER_RTCD; void vp8_encode_inter16x16(MACROBLOCK *x);
void vp8_encode_inter16x16(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x);
void vp8_build_dcblock(MACROBLOCK *b); void vp8_build_dcblock(MACROBLOCK *b);
void vp8_transform_mb(MACROBLOCK *mb); void vp8_transform_mb(MACROBLOCK *mb);
void vp8_transform_mbuv(MACROBLOCK *x); void vp8_transform_mbuv(MACROBLOCK *x);
void vp8_transform_intra_mby(MACROBLOCK *x); void vp8_transform_intra_mby(MACROBLOCK *x);
void vp8_optimize_mby(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd); void vp8_optimize_mby(MACROBLOCK *x);
void vp8_optimize_mbuv(MACROBLOCK *x, const struct VP8_ENCODER_RTCD *rtcd); void vp8_optimize_mbuv(MACROBLOCK *x);
void vp8_encode_inter16x16y(const struct VP8_ENCODER_RTCD *rtcd, MACROBLOCK *x); void vp8_encode_inter16x16y(MACROBLOCK *x);
#endif #endif
...@@ -425,9 +425,6 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi, ...@@ -425,9 +425,6 @@ void vp8cx_init_mbrthread_data(VP8_COMP *cpi,
mbd->subpixel_predict8x4 = xd->subpixel_predict8x4; mbd->subpixel_predict8x4 = xd->subpixel_predict8x4;
mbd->subpixel_predict8x8 = xd->subpixel_predict8x8; mbd->subpixel_predict8x8 = xd->subpixel_predict8x8;
mbd->subpixel_predict16x16 = xd->subpixel_predict16x16; mbd->subpixel_predict16x16 = xd->subpixel_predict16x16;
#if CONFIG_RUNTIME_CPU_DETECT
mbd->rtcd = xd->rtcd;
#endif
mb->gf_active_ptr = x->gf_active_ptr; mb->gf_active_ptr = x->gf_active_ptr;
vpx_memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts)); vpx_memset(mbr_ei[i].segment_counts, 0, sizeof(mbr_ei[i].segment_counts));
......
...@@ -31,12 +31,6 @@ ...@@ -31,12 +31,6 @@
//#define OUTPUT_FPF 1 //#define OUTPUT_FPF 1
#if CONFIG_RUNTIME_CPU_DETECT
#define IF_RTCD(x) (x)
#else
#define IF_RTCD(x) NULL
#endif
extern void vp8_build_block_offsets(MACROBLOCK *x); extern void vp8_build_block_offsets(MACROBLOCK *x);
extern void vp8_setup_block_ptrs(MACROBLOCK *x); extern void vp8_setup_block_ptrs(MACROBLOCK *x);
extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi); extern void vp8cx_frame_init_quantizer(VP8_COMP *cpi);
...@@ -674,7 +668,7 @@ void vp8_first_pass(VP8_COMP *cpi) ...@@ -674,7 +668,7 @@ void vp8_first_pass(VP8_COMP *cpi)
d->bmi.mv.as_mv.col <<= 3; d->bmi.mv.as_mv.col <<= 3;
this_error = motion_error; this_error = motion_error;
vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv); vp8_set_mbmode_and_mvs(x, NEWMV, &d->bmi.mv);
vp8_encode_inter16x16y(IF_RTCD(&cpi->rtcd), x); vp8_encode_inter16x16y(x);
sum_mvr += d->bmi.mv.as_mv.row; sum_mvr += d->bmi.mv.as_mv.row;
sum_mvr_abs += abs(d->bmi.mv.as_mv.row); sum_mvr_abs += abs(d->bmi.mv.as_mv.row);
sum_mvc += d->bmi.mv.as_mv.col; sum_mvc += d->bmi.mv.as_mv.col;
......
...@@ -24,12 +24,6 @@ extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc, ...@@ -24,12 +24,6 @@ extern void vp8_yv12_copy_partial_frame(YV12_BUFFER_CONFIG *src_ybc,
void vp8_cmachine_specific_config(VP8_COMP *cpi) void vp8_cmachine_specific_config(VP8_COMP *cpi)
{ {
#if CONFIG_RUNTIME_CPU_DETECT
#if !(CONFIG_REALTIME_ONLY)
cpi->rtcd.temporal.apply = vp8_temporal_filter_apply_c;
#endif
#endif
// Pure C: // Pure C:
vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame; vp8_yv12_copy_partial_frame_ptr = vp8_yv12_copy_partial_frame;
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#include "vp8/common/swapyv12buffer.h" #include "vp8/common/swapyv12buffer.h"
#include "vp8/common/threading.h" #include "vp8/common/threading.h"
#include "vpx_ports/vpx_timer.h" #include "vpx_ports/vpx_timer.h"
#include "temporal_filter.h"