Commit 53f64a77 authored by John Koleszar's avatar John Koleszar Committed by Code Review

Merge "arm: move unrolled loops back to generic code"

parents 9fdd90c9 19638c23
......@@ -15,36 +15,6 @@
extern void vp8_recon16x16mb_neon(unsigned char *pred_ptr, short *diff_ptr, unsigned char *dst_ptr, int ystride, unsigned char *udst_ptr, unsigned char *vdst_ptr);
/*
void vp8_recon16x16mby(MACROBLOCKD *x)
{
int i;
for(i=0;i<16;i+=4)
{
//vp8_recon4b(&x->block[i]);
BLOCKD *b = &x->block[i];
vp8_recon4b (b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
}
*/
void vp8_recon16x16mby(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
{
BLOCKD *b = &x->block[0];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
//b = &x->block[4];
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
//b = &x->block[8];
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
//b = &x->block[12];
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#if HAVE_ARMV7
void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
{
......@@ -58,52 +28,4 @@ void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
vp8_recon16x16mb_neon(pred_ptr, diff_ptr, dst_ptr, ystride, udst_ptr, vdst_ptr);
}
#else
/*
void vp8_recon16x16mb(MACROBLOCKD *x)
{
int i;
for(i=0;i<16;i+=4)
{
// vp8_recon4b(&x->block[i]);
BLOCKD *b = &x->block[i];
vp8_recon4b (b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
for(i=16;i<24;i+=2)
{
// vp8_recon2b(&x->block[i]);
BLOCKD *b = &x->block[i];
vp8_recon2b (b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
}
*/
void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
{
BLOCKD *b = &x->block[0];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
//b = &x->block[16];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#endif
This diff is collapsed.
......@@ -108,6 +108,22 @@ void vp8_recon2b_c
void vp8_recon16x16mby(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
{
#if ARCH_ARM
BLOCKD *b = &x->block[0];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
//b = &x->block[4];
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
//b = &x->block[8];
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
//b = &x->block[12];
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
#else
int i;
for (i = 0; i < 16; i += 4)
......@@ -116,10 +132,36 @@ void vp8_recon16x16mby(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#endif
}
void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
{
#if ARCH_ARM
BLOCKD *b = &x->block[0];
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
RECON_INVOKE(rtcd, recon4)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 4;
//b = &x->block[16];
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b++;
b++;
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
#else
int i;
for (i = 0; i < 16; i += 4)
......@@ -135,4 +177,5 @@ void vp8_recon16x16mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#endif
}
......@@ -318,6 +318,74 @@ void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
vp8_intra_prediction_down_copy(x);
#if ARCH_ARM
{
BLOCKD *b = &x->block[0];
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
b += 1;
vp8_predict_intra4x4(b, b->bmi.mode, b->predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#else
for (i = 0; i < 16; i++)
{
BLOCKD *b = &x->block[i];
......@@ -325,6 +393,7 @@ void vp8_recon_intra4x4mb(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
vp8_predict_intra4x4(b, x->block[i].bmi.mode, x->block[i].predictor);
RECON_INVOKE(rtcd, recon)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
}
#endif
vp8_recon_intra_mbuv(rtcd, x);
......
......@@ -119,7 +119,6 @@ VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/bilinearfilter_arm.c
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/filter_arm.c
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/loopfilter_arm.c
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/recon_arm.c
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/reconintra4x4_arm.c
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/reconintra_arm.c
VP8_COMMON_SRCS-$(HAVE_ARMV6) += common/arm/vpx_asm_offsets.c
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment