Commit 5a2fd63a authored by Tero Rintaluoma's avatar Tero Rintaluoma
Browse files

ARMv6 optimized Intra4x4 prediction

Added ARM optimized intra 4x4 prediction
 - 2x faster on Profiler compared to C-code compiled with -O3
 - Function interface changed a little to improve BLOCKD structure
   access

Change-Id: I9bc2b723155943fe0cf03dd9ca5f1760f7a81f54
parent f89ea343
......@@ -63,6 +63,7 @@ void vp8_arch_arm_common_init(VP8_COMMON *ctx)
rtcd->recon.copy16x16 = vp8_copy_mem16x16_v6;
rtcd->recon.copy8x8 = vp8_copy_mem8x8_v6;
rtcd->recon.copy8x4 = vp8_copy_mem8x4_v6;
rtcd->recon.intra4x4_predict = vp8_intra4x4_predict_armv6;
}
#endif
......
This diff is collapsed.
......@@ -17,6 +17,7 @@
extern prototype_copy_block(vp8_copy_mem8x8_v6);
extern prototype_copy_block(vp8_copy_mem8x4_v6);
extern prototype_copy_block(vp8_copy_mem16x16_v6);
extern prototype_intra4x4_predict(vp8_intra4x4_predict_armv6);
#if !CONFIG_RUNTIME_CPU_DETECT
#undef vp8_recon_copy8x8
......@@ -27,6 +28,9 @@ extern prototype_copy_block(vp8_copy_mem16x16_v6);
#undef vp8_recon_copy16x16
#define vp8_recon_copy16x16 vp8_copy_mem16x16_v6
#undef vp8_recon_intra4x4_predict
#define vp8_recon_intra4x4_predict vp8_intra4x4_predict_armv6
#endif
#endif
......
......@@ -13,6 +13,7 @@
#include "vpx/vpx_codec.h"
#include "vpx_ports/asm_offsets.h"
#include "vpx_scale/yv12config.h"
#include "vp8/common/blockd.h"
BEGIN
......@@ -34,6 +35,20 @@ END
/* add asserts for any offset that is not supported by assembly code */
/* add asserts for any size that is not supported by assembly code */
#if HAVE_ARMV6
/* switch case in vp8_intra4x4_predict_armv6 is based on these enumerated values */
ct_assert(B_DC_PRED, B_DC_PRED == 0);
ct_assert(B_TM_PRED, B_TM_PRED == 1);
ct_assert(B_VE_PRED, B_VE_PRED == 2);
ct_assert(B_HE_PRED, B_HE_PRED == 3);
ct_assert(B_LD_PRED, B_LD_PRED == 4);
ct_assert(B_RD_PRED, B_RD_PRED == 5);
ct_assert(B_VR_PRED, B_VR_PRED == 6);
ct_assert(B_VL_PRED, B_VL_PRED == 7);
ct_assert(B_HD_PRED, B_HD_PRED == 8);
ct_assert(B_HU_PRED, B_HU_PRED == 9);
#endif
#if HAVE_ARMV7
/* vp8_yv12_extend_frame_borders_neon makes several assumptions based on this */
ct_assert(VP8BORDERINPIXELS_VAL, VP8BORDERINPIXELS == 32)
......
......@@ -88,7 +88,7 @@ void vp8_machine_specific_config(VP8_COMMON *ctx)
rtcd->recon.build_intra_predictors_mbuv_s =
vp8_build_intra_predictors_mbuv_s;
rtcd->recon.intra4x4_predict =
vp8_intra4x4_predict;
vp8_intra4x4_predict_c;
rtcd->subpix.sixtap16x16 = vp8_sixtap_predict16x16_c;
rtcd->subpix.sixtap8x8 = vp8_sixtap_predict8x8_c;
......
......@@ -27,7 +27,8 @@
void sym(MACROBLOCKD *x)
#define prototype_intra4x4_predict(sym) \
void sym(BLOCKD *x, int b_mode, unsigned char *predictor, int stride)
void sym(unsigned char *src, int src_stride, int b_mode, \
unsigned char *dst, int dst_stride)
struct vp8_recon_rtcd_vtable;
......@@ -79,7 +80,7 @@ extern prototype_build_intra_predictors\
(vp8_recon_build_intra_predictors_mbuv_s);
#ifndef vp8_recon_intra4x4_predict
#define vp8_recon_intra4x4_predict vp8_intra4x4_predict
#define vp8_recon_intra4x4_predict vp8_intra4x4_predict_c
#endif
extern prototype_intra4x4_predict\
(vp8_recon_intra4x4_predict);
......
......@@ -9,25 +9,22 @@
*/
#include "vpx_config.h"
#include "recon.h"
#include "vpx_mem/vpx_mem.h"
#include "reconintra.h"
void vp8_intra4x4_predict(BLOCKD *x,
int b_mode,
unsigned char *predictor, int stride)
void vp8_intra4x4_predict_c(unsigned char *src, int src_stride,
int b_mode,
unsigned char *dst, int dst_stride)
{
int i, r, c;
unsigned char *Above = *(x->base_dst) + x->dst - x->dst_stride;
unsigned char *Above = src - src_stride;
unsigned char Left[4];
unsigned char top_left = Above[-1];
Left[0] = (*(x->base_dst))[x->dst - 1];
Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride];
Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride];
Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride];
Left[0] = src[-1];
Left[1] = src[-1 + src_stride];
Left[2] = src[-1 + 2 * src_stride];
Left[3] = src[-1 + 3 * src_stride];
switch (b_mode)
{
......@@ -47,10 +44,10 @@ void vp8_intra4x4_predict(BLOCKD *x,
{
for (c = 0; c < 4; c++)
{
predictor[c] = expected_dc;
dst[c] = expected_dc;
}
predictor += stride;
dst += dst_stride;
}
}
break;
......@@ -69,10 +66,10 @@ void vp8_intra4x4_predict(BLOCKD *x,
if (pred > 255)
pred = 255;
predictor[c] = pred;
dst[c] = pred;
}
predictor += stride;
dst += dst_stride;
}
}
break;
......@@ -91,10 +88,10 @@ void vp8_intra4x4_predict(BLOCKD *x,
for (c = 0; c < 4; c++)
{
predictor[c] = ap[c];
dst[c] = ap[c];
}
predictor += stride;
dst += dst_stride;
}
}
......@@ -114,32 +111,32 @@ void vp8_intra4x4_predict(BLOCKD *x,
{
for (c = 0; c < 4; c++)
{
predictor[c] = lp[r];
dst[c] = lp[r];
}
predictor += stride;
dst += dst_stride;
}
}
break;
case B_LD_PRED:
{
unsigned char *ptr = Above;
predictor[0 * stride + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
predictor[0 * stride + 1] =
predictor[1 * stride + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
predictor[0 * stride + 2] =
predictor[1 * stride + 1] =
predictor[2 * stride + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
predictor[0 * stride + 3] =
predictor[1 * stride + 2] =
predictor[2 * stride + 1] =
predictor[3 * stride + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2;
predictor[1 * stride + 3] =
predictor[2 * stride + 2] =
predictor[3 * stride + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
predictor[2 * stride + 3] =
predictor[3 * stride + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
predictor[3 * stride + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
dst[0 * dst_stride + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
dst[0 * dst_stride + 1] =
dst[1 * dst_stride + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
dst[0 * dst_stride + 2] =
dst[1 * dst_stride + 1] =
dst[2 * dst_stride + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
dst[0 * dst_stride + 3] =
dst[1 * dst_stride + 2] =
dst[2 * dst_stride + 1] =
dst[3 * dst_stride + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2;
dst[1 * dst_stride + 3] =
dst[2 * dst_stride + 2] =
dst[3 * dst_stride + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
dst[2 * dst_stride + 3] =
dst[3 * dst_stride + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
dst[3 * dst_stride + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
}
break;
......@@ -158,22 +155,22 @@ void vp8_intra4x4_predict(BLOCKD *x,
pp[7] = Above[2];
pp[8] = Above[3];
predictor[3 * stride + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[3 * stride + 1] =
predictor[2 * stride + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[3 * stride + 2] =
predictor[2 * stride + 1] =
predictor[1 * stride + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[3 * stride + 3] =
predictor[2 * stride + 2] =
predictor[1 * stride + 1] =
predictor[0 * stride + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * stride + 3] =
predictor[1 * stride + 2] =
predictor[0 * stride + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[1 * stride + 3] =
predictor[0 * stride + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[0 * stride + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
dst[3 * dst_stride + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
dst[3 * dst_stride + 1] =
dst[2 * dst_stride + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
dst[3 * dst_stride + 2] =
dst[2 * dst_stride + 1] =
dst[1 * dst_stride + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
dst[3 * dst_stride + 3] =
dst[2 * dst_stride + 2] =
dst[1 * dst_stride + 1] =
dst[0 * dst_stride + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
dst[2 * dst_stride + 3] =
dst[1 * dst_stride + 2] =
dst[0 * dst_stride + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
dst[1 * dst_stride + 3] =
dst[0 * dst_stride + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
dst[0 * dst_stride + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
}
break;
......@@ -193,22 +190,22 @@ void vp8_intra4x4_predict(BLOCKD *x,
pp[8] = Above[3];
predictor[3 * stride + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * stride + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[3 * stride + 1] =
predictor[1 * stride + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * stride + 1] =
predictor[0 * stride + 0] = (pp[4] + pp[5] + 1) >> 1;
predictor[3 * stride + 2] =
predictor[1 * stride + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[2 * stride + 2] =
predictor[0 * stride + 1] = (pp[5] + pp[6] + 1) >> 1;
predictor[3 * stride + 3] =
predictor[1 * stride + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
predictor[2 * stride + 3] =
predictor[0 * stride + 2] = (pp[6] + pp[7] + 1) >> 1;
predictor[1 * stride + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
predictor[0 * stride + 3] = (pp[7] + pp[8] + 1) >> 1;
dst[3 * dst_stride + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
dst[2 * dst_stride + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
dst[3 * dst_stride + 1] =
dst[1 * dst_stride + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
dst[2 * dst_stride + 1] =
dst[0 * dst_stride + 0] = (pp[4] + pp[5] + 1) >> 1;
dst[3 * dst_stride + 2] =
dst[1 * dst_stride + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
dst[2 * dst_stride + 2] =
dst[0 * dst_stride + 1] = (pp[5] + pp[6] + 1) >> 1;
dst[3 * dst_stride + 3] =
dst[1 * dst_stride + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
dst[2 * dst_stride + 3] =
dst[0 * dst_stride + 2] = (pp[6] + pp[7] + 1) >> 1;
dst[1 * dst_stride + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
dst[0 * dst_stride + 3] = (pp[7] + pp[8] + 1) >> 1;
}
break;
......@@ -217,22 +214,22 @@ void vp8_intra4x4_predict(BLOCKD *x,
unsigned char *pp = Above;
predictor[0 * stride + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[1 * stride + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[2 * stride + 0] =
predictor[0 * stride + 1] = (pp[1] + pp[2] + 1) >> 1;
predictor[1 * stride + 1] =
predictor[3 * stride + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * stride + 1] =
predictor[0 * stride + 2] = (pp[2] + pp[3] + 1) >> 1;
predictor[3 * stride + 1] =
predictor[1 * stride + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[0 * stride + 3] =
predictor[2 * stride + 2] = (pp[3] + pp[4] + 1) >> 1;
predictor[1 * stride + 3] =
predictor[3 * stride + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[2 * stride + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[3 * stride + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
dst[0 * dst_stride + 0] = (pp[0] + pp[1] + 1) >> 1;
dst[1 * dst_stride + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
dst[2 * dst_stride + 0] =
dst[0 * dst_stride + 1] = (pp[1] + pp[2] + 1) >> 1;
dst[1 * dst_stride + 1] =
dst[3 * dst_stride + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
dst[2 * dst_stride + 1] =
dst[0 * dst_stride + 2] = (pp[2] + pp[3] + 1) >> 1;
dst[3 * dst_stride + 1] =
dst[1 * dst_stride + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
dst[0 * dst_stride + 3] =
dst[2 * dst_stride + 2] = (pp[3] + pp[4] + 1) >> 1;
dst[1 * dst_stride + 3] =
dst[3 * dst_stride + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
dst[2 * dst_stride + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
dst[3 * dst_stride + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
}
break;
......@@ -250,22 +247,22 @@ void vp8_intra4x4_predict(BLOCKD *x,
pp[8] = Above[3];
predictor[3 * stride + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[3 * stride + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[2 * stride + 0] =
predictor[3 * stride + 2] = (pp[1] + pp[2] + 1) >> 1;
predictor[2 * stride + 1] =
predictor[3 * stride + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[2 * stride + 2] =
predictor[1 * stride + 0] = (pp[2] + pp[3] + 1) >> 1;
predictor[2 * stride + 3] =
predictor[1 * stride + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
predictor[1 * stride + 2] =
predictor[0 * stride + 0] = (pp[3] + pp[4] + 1) >> 1;
predictor[1 * stride + 3] =
predictor[0 * stride + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
predictor[0 * stride + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
predictor[0 * stride + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
dst[3 * dst_stride + 0] = (pp[0] + pp[1] + 1) >> 1;
dst[3 * dst_stride + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
dst[2 * dst_stride + 0] =
dst[3 * dst_stride + 2] = (pp[1] + pp[2] + 1) >> 1;
dst[2 * dst_stride + 1] =
dst[3 * dst_stride + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
dst[2 * dst_stride + 2] =
dst[1 * dst_stride + 0] = (pp[2] + pp[3] + 1) >> 1;
dst[2 * dst_stride + 3] =
dst[1 * dst_stride + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
dst[1 * dst_stride + 2] =
dst[0 * dst_stride + 0] = (pp[3] + pp[4] + 1) >> 1;
dst[1 * dst_stride + 3] =
dst[0 * dst_stride + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
dst[0 * dst_stride + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
dst[0 * dst_stride + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
}
break;
......@@ -273,22 +270,22 @@ void vp8_intra4x4_predict(BLOCKD *x,
case B_HU_PRED:
{
unsigned char *pp = Left;
predictor[0 * stride + 0] = (pp[0] + pp[1] + 1) >> 1;
predictor[0 * stride + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
predictor[0 * stride + 2] =
predictor[1 * stride + 0] = (pp[1] + pp[2] + 1) >> 1;
predictor[0 * stride + 3] =
predictor[1 * stride + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
predictor[1 * stride + 2] =
predictor[2 * stride + 0] = (pp[2] + pp[3] + 1) >> 1;
predictor[1 * stride + 3] =
predictor[2 * stride + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
predictor[2 * stride + 2] =
predictor[2 * stride + 3] =
predictor[3 * stride + 0] =
predictor[3 * stride + 1] =
predictor[3 * stride + 2] =
predictor[3 * stride + 3] = pp[3];
dst[0 * dst_stride + 0] = (pp[0] + pp[1] + 1) >> 1;
dst[0 * dst_stride + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
dst[0 * dst_stride + 2] =
dst[1 * dst_stride + 0] = (pp[1] + pp[2] + 1) >> 1;
dst[0 * dst_stride + 3] =
dst[1 * dst_stride + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
dst[1 * dst_stride + 2] =
dst[2 * dst_stride + 0] = (pp[2] + pp[3] + 1) >> 1;
dst[1 * dst_stride + 3] =
dst[2 * dst_stride + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
dst[2 * dst_stride + 2] =
dst[2 * dst_stride + 3] =
dst[3 * dst_stride + 0] =
dst[3 * dst_stride + 1] =
dst[3 * dst_stride + 2] =
dst[3 * dst_stride + 3] = pp[3];
}
break;
......@@ -316,5 +313,3 @@ void vp8_intra_prediction_down_copy(MACROBLOCKD *x)
*dst_ptr1 = *src_ptr;
*dst_ptr2 = *src_ptr;
}
......@@ -211,7 +211,8 @@ static void decode_macroblock(VP8D_COMP *pbi, MACROBLOCKD *xd,
int b_mode = xd->mode_info_context->bmi[i].as_mode;
RECON_INVOKE(RTCD_VTABLE(recon), intra4x4_predict)
(b, b_mode, *(b->base_dst) + b->dst, b->dst_stride);
( *(b->base_dst) + b->dst, b->dst_stride, b_mode,
*(b->base_dst) + b->dst, b->dst_stride );
if (xd->eobs[i] )
{
......
......@@ -64,7 +64,8 @@ void vp8_encode_intra4x4block(const VP8_ENCODER_RTCD *rtcd,
BLOCK *be = &x->block[ib];
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
(b, b->bmi.as_mode, b->predictor, 16);
(*(b->base_dst) + b->dst, b->dst_stride,
b->bmi.as_mode, b->predictor, 16);
ENCODEMB_INVOKE(&rtcd->encodemb, subb)(be, b, 16);
......
......@@ -157,7 +157,8 @@ static int pick_intra4x4block(
rate = mode_costs[mode];
RECON_INVOKE(&rtcd->common->recon, intra4x4_predict)
(b, mode, b->predictor, 16);
(*(b->base_dst) + b->dst, b->dst_stride,
mode, b->predictor, 16);
distortion = get_prediction_error(be, b, &rtcd->variance);
this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
......
......@@ -631,7 +631,8 @@ static int rd_pick_intra4x4block(
rate = bmode_costs[mode];
RECON_INVOKE(&cpi->rtcd.common->recon, intra4x4_predict)
(b, mode, b->predictor, 16);
(*(b->base_dst) + b->dst, b->dst_stride,
mode, b->predictor, 16);
ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
x->quantize_b(be, b);
......
......@@ -126,6 +126,7 @@ VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/armv6/idct_v6$(ASM)
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/armv6/loopfilter_v6$(ASM)
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/armv6/simpleloopfilter_v6$(ASM)
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/armv6/sixtappredict8x4_v6$(ASM)
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/armv6/intra4x4_predict_v6$(ASM)
# common (neon)
VP8_COMMON_SRCS-$(HAVE_ARMV7) += common/arm/neon/bilinearpredict4x4_neon$(ASM)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment