Commit 93b543ab authored by Urvang Joshi's avatar Urvang Joshi

Remove ALT_INTRA flag.

This experiment has been adopted as it has been cleared by Tapas.

Change-Id: I0682face60f62dd43091efa0a92d09d846396850
parent 9f262c5b
......@@ -62,14 +62,9 @@ foreach $w (@tx_dims) {
}
}
@pred_names = qw/dc dc_top dc_left dc_128 v h d207e d63e d45e d117 d135 d153/;
if (aom_config("CONFIG_ALT_INTRA") eq "yes") {
push @pred_names, qw/paeth smooth/;
if (aom_config("CONFIG_SMOOTH_HV") eq "yes") {
push @pred_names, qw/smooth_v smooth_h/;
}
} else {
push @pred_names, 'tm';
@pred_names = qw/dc dc_top dc_left dc_128 v h d207e d63e d45e d117 d135 d153 paeth smooth/;
if (aom_config("CONFIG_SMOOTH_HV") eq "yes") {
push @pred_names, qw/smooth_v smooth_h/;
}
#
......@@ -93,9 +88,6 @@ specialize qw/aom_h_predictor_4x4 neon dspr2 msa sse2/;
specialize qw/aom_d135_predictor_4x4 neon/;
specialize qw/aom_d153_predictor_4x4 ssse3/;
specialize qw/aom_v_predictor_4x4 neon msa sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_tm_predictor_4x4 neon dspr2 msa sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_dc_predictor_4x4 dspr2 msa neon sse2/;
specialize qw/aom_dc_top_predictor_4x4 msa neon sse2/;
specialize qw/aom_dc_left_predictor_4x4 msa neon sse2/;
......@@ -103,9 +95,6 @@ specialize qw/aom_dc_128_predictor_4x4 msa neon sse2/;
specialize qw/aom_h_predictor_8x8 neon dspr2 msa sse2/;
specialize qw/aom_d153_predictor_8x8 ssse3/;
specialize qw/aom_v_predictor_8x8 neon msa sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_tm_predictor_8x8 neon dspr2 msa sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_dc_predictor_8x8 dspr2 neon msa sse2/;
specialize qw/aom_dc_top_predictor_8x8 neon msa sse2/;
specialize qw/aom_dc_left_predictor_8x8 neon msa sse2/;
......@@ -113,9 +102,6 @@ specialize qw/aom_dc_128_predictor_8x8 neon msa sse2/;
specialize qw/aom_h_predictor_16x16 neon dspr2 msa sse2/;
specialize qw/aom_d153_predictor_16x16 ssse3/;
specialize qw/aom_v_predictor_16x16 neon msa sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_tm_predictor_16x16 neon msa sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_dc_predictor_16x16 dspr2 neon msa sse2/;
specialize qw/aom_dc_top_predictor_16x16 neon msa sse2/;
specialize qw/aom_dc_left_predictor_16x16 neon msa sse2/;
......@@ -123,9 +109,6 @@ specialize qw/aom_dc_128_predictor_16x16 neon msa sse2/;
specialize qw/aom_h_predictor_32x32 neon msa sse2/;
specialize qw/aom_d153_predictor_32x32 ssse3/;
specialize qw/aom_v_predictor_32x32 neon msa sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_tm_predictor_32x32 neon msa sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_dc_predictor_32x32 msa neon sse2/;
specialize qw/aom_dc_top_predictor_32x32 msa neon sse2/;
specialize qw/aom_dc_left_predictor_32x32 msa neon sse2/;
......@@ -133,24 +116,12 @@ specialize qw/aom_dc_128_predictor_32x32 msa neon sse2/;
if (aom_config("CONFIG_HIGHBITDEPTH") eq "yes") {
specialize qw/aom_highbd_v_predictor_4x4 sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_highbd_tm_predictor_4x4 sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_highbd_dc_predictor_4x4 sse2/;
specialize qw/aom_highbd_v_predictor_8x8 sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_highbd_tm_predictor_8x8 sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_highbd_dc_predictor_8x8 sse2/;;
specialize qw/aom_highbd_v_predictor_16x16 sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_highbd_tm_predictor_16x16 sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_highbd_dc_predictor_16x16 sse2/;
specialize qw/aom_highbd_v_predictor_32x32 sse2/;
if (aom_config("CONFIG_ALT_INTRA") eq "") {
specialize qw/aom_highbd_tm_predictor_32x32 sse2/;
} # CONFIG_ALT_INTRA
specialize qw/aom_highbd_dc_predictor_32x32 sse2/;
} # CONFIG_HIGHBITDEPTH
......
......@@ -529,229 +529,4 @@ void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
}
}
}
void aom_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int i;
uint16x8_t q1u16, q3u16;
int16x8_t q1s16;
uint8x8_t d0u8 = vdup_n_u8(0);
uint32x2_t d2u32 = vdup_n_u32(0);
d0u8 = vld1_dup_u8(above - 1);
d2u32 = vld1_lane_u32((const uint32_t *)above, d2u32, 0);
q3u16 = vsubl_u8(vreinterpret_u8_u32(d2u32), d0u8);
for (i = 0; i < 4; i++, dst += stride) {
q1u16 = vdupq_n_u16((uint16_t)left[i]);
q1s16 =
vaddq_s16(vreinterpretq_s16_u16(q1u16), vreinterpretq_s16_u16(q3u16));
d0u8 = vqmovun_s16(q1s16);
vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
}
}
void aom_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j;
uint16x8_t q0u16, q3u16, q10u16;
int16x8_t q0s16;
uint16x4_t d20u16;
uint8x8_t d0u8, d2u8, d30u8;
d0u8 = vld1_dup_u8(above - 1);
d30u8 = vld1_u8(left);
d2u8 = vld1_u8(above);
q10u16 = vmovl_u8(d30u8);
q3u16 = vsubl_u8(d2u8, d0u8);
d20u16 = vget_low_u16(q10u16);
for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) {
q0u16 = vdupq_lane_u16(d20u16, 0);
q0s16 =
vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
d0u8 = vqmovun_s16(q0s16);
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
dst += stride;
q0u16 = vdupq_lane_u16(d20u16, 1);
q0s16 =
vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
d0u8 = vqmovun_s16(q0s16);
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
dst += stride;
q0u16 = vdupq_lane_u16(d20u16, 2);
q0s16 =
vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
d0u8 = vqmovun_s16(q0s16);
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
dst += stride;
q0u16 = vdupq_lane_u16(d20u16, 3);
q0s16 =
vaddq_s16(vreinterpretq_s16_u16(q3u16), vreinterpretq_s16_u16(q0u16));
d0u8 = vqmovun_s16(q0s16);
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d0u8));
dst += stride;
}
}
void aom_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j, k;
uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16;
uint8x16_t q0u8, q1u8;
int16x8_t q0s16, q1s16, q8s16, q11s16;
uint16x4_t d20u16;
uint8x8_t d2u8, d3u8, d18u8, d22u8, d23u8;
q0u8 = vld1q_dup_u8(above - 1);
q1u8 = vld1q_u8(above);
q2u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8));
q3u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8));
for (k = 0; k < 2; k++, left += 8) {
d18u8 = vld1_u8(left);
q10u16 = vmovl_u8(d18u8);
d20u16 = vget_low_u16(q10u16);
for (j = 0; j < 2; j++, d20u16 = vget_high_u16(q10u16)) {
q0u16 = vdupq_lane_u16(d20u16, 0);
q8u16 = vdupq_lane_u16(d20u16, 1);
q1s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16));
q0s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16));
q11s16 =
vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16));
q8s16 =
vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16));
d2u8 = vqmovun_s16(q1s16);
d3u8 = vqmovun_s16(q0s16);
d22u8 = vqmovun_s16(q11s16);
d23u8 = vqmovun_s16(q8s16);
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8));
vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8));
dst += stride;
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8));
vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8));
dst += stride;
q0u16 = vdupq_lane_u16(d20u16, 2);
q8u16 = vdupq_lane_u16(d20u16, 3);
q1s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q2u16));
q0s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q3u16));
q11s16 =
vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q2u16));
q8s16 =
vaddq_s16(vreinterpretq_s16_u16(q8u16), vreinterpretq_s16_u16(q3u16));
d2u8 = vqmovun_s16(q1s16);
d3u8 = vqmovun_s16(q0s16);
d22u8 = vqmovun_s16(q11s16);
d23u8 = vqmovun_s16(q8s16);
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d2u8));
vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d3u8));
dst += stride;
vst1_u64((uint64_t *)dst, vreinterpret_u64_u8(d22u8));
vst1_u64((uint64_t *)(dst + 8), vreinterpret_u64_u8(d23u8));
dst += stride;
}
}
}
void aom_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
const uint8_t *above, const uint8_t *left) {
int j, k;
uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16;
uint8x16_t q0u8, q1u8, q2u8;
int16x8_t q12s16, q13s16, q14s16, q15s16;
uint16x4_t d6u16;
uint8x8_t d0u8, d1u8, d2u8, d3u8, d26u8;
q0u8 = vld1q_dup_u8(above - 1);
q1u8 = vld1q_u8(above);
q2u8 = vld1q_u8(above + 16);
q8u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q0u8));
q9u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q0u8));
q10u16 = vsubl_u8(vget_low_u8(q2u8), vget_low_u8(q0u8));
q11u16 = vsubl_u8(vget_high_u8(q2u8), vget_high_u8(q0u8));
for (k = 0; k < 4; k++, left += 8) {
d26u8 = vld1_u8(left);
q3u16 = vmovl_u8(d26u8);
d6u16 = vget_low_u16(q3u16);
for (j = 0; j < 2; j++, d6u16 = vget_high_u16(q3u16)) {
q0u16 = vdupq_lane_u16(d6u16, 0);
q12s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
q13s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q10u16));
q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q11u16));
d0u8 = vqmovun_s16(q12s16);
d1u8 = vqmovun_s16(q13s16);
d2u8 = vqmovun_s16(q14s16);
d3u8 = vqmovun_s16(q15s16);
q0u8 = vcombine_u8(d0u8, d1u8);
q1u8 = vcombine_u8(d2u8, d3u8);
vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
dst += stride;
q0u16 = vdupq_lane_u16(d6u16, 1);
q12s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
q13s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q10u16));
q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q11u16));
d0u8 = vqmovun_s16(q12s16);
d1u8 = vqmovun_s16(q13s16);
d2u8 = vqmovun_s16(q14s16);
d3u8 = vqmovun_s16(q15s16);
q0u8 = vcombine_u8(d0u8, d1u8);
q1u8 = vcombine_u8(d2u8, d3u8);
vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
dst += stride;
q0u16 = vdupq_lane_u16(d6u16, 2);
q12s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
q13s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q10u16));
q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q11u16));
d0u8 = vqmovun_s16(q12s16);
d1u8 = vqmovun_s16(q13s16);
d2u8 = vqmovun_s16(q14s16);
d3u8 = vqmovun_s16(q15s16);
q0u8 = vcombine_u8(d0u8, d1u8);
q1u8 = vcombine_u8(d2u8, d3u8);
vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
dst += stride;
q0u16 = vdupq_lane_u16(d6u16, 3);
q12s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q8u16));
q13s16 =
vaddq_s16(vreinterpretq_s16_u16(q0u16), vreinterpretq_s16_u16(q9u16));
q14s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q10u16));
q15s16 = vaddq_s16(vreinterpretq_s16_u16(q0u16),
vreinterpretq_s16_u16(q11u16));
d0u8 = vqmovun_s16(q12s16);
d1u8 = vqmovun_s16(q13s16);
d2u8 = vqmovun_s16(q14s16);
d3u8 = vqmovun_s16(q15s16);
q0u8 = vcombine_u8(d0u8, d1u8);
q1u8 = vcombine_u8(d2u8, d3u8);
vst1q_u64((uint64_t *)dst, vreinterpretq_u64_u8(q0u8));
vst1q_u64((uint64_t *)(dst + 16), vreinterpretq_u64_u8(q1u8));
dst += stride;
}
}
}
#endif // !HAVE_NEON_ASM
......@@ -19,10 +19,6 @@
EXPORT |aom_h_predictor_8x8_neon|
EXPORT |aom_h_predictor_16x16_neon|
EXPORT |aom_h_predictor_32x32_neon|
EXPORT |aom_tm_predictor_4x4_neon|
EXPORT |aom_tm_predictor_8x8_neon|
EXPORT |aom_tm_predictor_16x16_neon|
EXPORT |aom_tm_predictor_32x32_neon|
ARM
REQUIRE8
PRESERVE8
......@@ -289,345 +285,3 @@ loop_h
bgt loop_h
bx lr
ENDP ; |aom_h_predictor_32x32_neon|
;void aom_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
; r1 ptrdiff_t y_stride
; r2 const uint8_t *above
; r3 const uint8_t *left
|aom_tm_predictor_4x4_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.u8 {d0[]}, [r12]
; Load above 4 pixels
vld1.32 {d2[0]}, [r2]
; Compute above - ytop_left
vsubl.u8 q3, d2, d0
; Load left row by row and compute left + (above - ytop_left)
; 1st row and 2nd row
vld1.u8 {d2[]}, [r3]!
vld1.u8 {d4[]}, [r3]!
vmovl.u8 q1, d2
vmovl.u8 q2, d4
vadd.s16 q1, q1, q3
vadd.s16 q2, q2, q3
vqmovun.s16 d0, q1
vqmovun.s16 d1, q2
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d1[0]}, [r0], r1
; 3rd row and 4th row
vld1.u8 {d2[]}, [r3]!
vld1.u8 {d4[]}, [r3]
vmovl.u8 q1, d2
vmovl.u8 q2, d4
vadd.s16 q1, q1, q3
vadd.s16 q2, q2, q3
vqmovun.s16 d0, q1
vqmovun.s16 d1, q2
vst1.32 {d0[0]}, [r0], r1
vst1.32 {d1[0]}, [r0], r1
bx lr
ENDP ; |aom_tm_predictor_4x4_neon|
;void aom_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
; r1 ptrdiff_t y_stride
; r2 const uint8_t *above
; r3 const uint8_t *left
|aom_tm_predictor_8x8_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.8 {d0[]}, [r12]
; preload 8 left
vld1.8 {d30}, [r3]
; Load above 8 pixels
vld1.64 {d2}, [r2]
vmovl.u8 q10, d30
; Compute above - ytop_left
vsubl.u8 q3, d2, d0
; Load left row by row and compute left + (above - ytop_left)
; 1st row and 2nd row
vdup.16 q0, d20[0]
vdup.16 q1, d20[1]
vadd.s16 q0, q3, q0
vadd.s16 q1, q3, q1
; 3rd row and 4th row
vdup.16 q8, d20[2]
vdup.16 q9, d20[3]
vadd.s16 q8, q3, q8
vadd.s16 q9, q3, q9
vqmovun.s16 d0, q0
vqmovun.s16 d1, q1
vqmovun.s16 d2, q8
vqmovun.s16 d3, q9
vst1.64 {d0}, [r0], r1
vst1.64 {d1}, [r0], r1
vst1.64 {d2}, [r0], r1
vst1.64 {d3}, [r0], r1
; 5th row and 6th row
vdup.16 q0, d21[0]
vdup.16 q1, d21[1]
vadd.s16 q0, q3, q0
vadd.s16 q1, q3, q1
; 7th row and 8th row
vdup.16 q8, d21[2]
vdup.16 q9, d21[3]
vadd.s16 q8, q3, q8
vadd.s16 q9, q3, q9
vqmovun.s16 d0, q0
vqmovun.s16 d1, q1
vqmovun.s16 d2, q8
vqmovun.s16 d3, q9
vst1.64 {d0}, [r0], r1
vst1.64 {d1}, [r0], r1
vst1.64 {d2}, [r0], r1
vst1.64 {d3}, [r0], r1
bx lr
ENDP ; |aom_tm_predictor_8x8_neon|
;void aom_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
; r1 ptrdiff_t y_stride
; r2 const uint8_t *above
; r3 const uint8_t *left
|aom_tm_predictor_16x16_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.8 {d0[]}, [r12]
; Load above 8 pixels
vld1.8 {q1}, [r2]
; preload 8 left into r12
vld1.8 {d18}, [r3]!
; Compute above - ytop_left
vsubl.u8 q2, d2, d0
vsubl.u8 q3, d3, d0
vmovl.u8 q10, d18
; Load left row by row and compute left + (above - ytop_left)
; Process 8 rows in each single loop and loop 2 times to process 16 rows.
mov r2, #2
loop_16x16_neon
; Process two rows.
vdup.16 q0, d20[0]
vdup.16 q8, d20[1]
vadd.s16 q1, q0, q2
vadd.s16 q0, q0, q3
vadd.s16 q11, q8, q2
vadd.s16 q8, q8, q3
vqmovun.s16 d2, q1
vqmovun.s16 d3, q0
vqmovun.s16 d22, q11
vqmovun.s16 d23, q8
vdup.16 q0, d20[2] ; proload next 2 rows data
vdup.16 q8, d20[3]
vst1.64 {d2,d3}, [r0], r1
vst1.64 {d22,d23}, [r0], r1
; Process two rows.
vadd.s16 q1, q0, q2
vadd.s16 q0, q0, q3
vadd.s16 q11, q8, q2
vadd.s16 q8, q8, q3
vqmovun.s16 d2, q1
vqmovun.s16 d3, q0
vqmovun.s16 d22, q11
vqmovun.s16 d23, q8
vdup.16 q0, d21[0] ; proload next 2 rows data
vdup.16 q8, d21[1]
vst1.64 {d2,d3}, [r0], r1
vst1.64 {d22,d23}, [r0], r1
vadd.s16 q1, q0, q2
vadd.s16 q0, q0, q3
vadd.s16 q11, q8, q2
vadd.s16 q8, q8, q3
vqmovun.s16 d2, q1
vqmovun.s16 d3, q0
vqmovun.s16 d22, q11
vqmovun.s16 d23, q8
vdup.16 q0, d21[2] ; proload next 2 rows data
vdup.16 q8, d21[3]
vst1.64 {d2,d3}, [r0], r1
vst1.64 {d22,d23}, [r0], r1
vadd.s16 q1, q0, q2
vadd.s16 q0, q0, q3
vadd.s16 q11, q8, q2
vadd.s16 q8, q8, q3
vqmovun.s16 d2, q1
vqmovun.s16 d3, q0
vqmovun.s16 d22, q11
vqmovun.s16 d23, q8
vld1.8 {d18}, [r3]! ; preload 8 left into r12
vmovl.u8 q10, d18
vst1.64 {d2,d3}, [r0], r1
vst1.64 {d22,d23}, [r0], r1
subs r2, r2, #1
bgt loop_16x16_neon
bx lr
ENDP ; |aom_tm_predictor_16x16_neon|
;void aom_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride,
; const uint8_t *above,
; const uint8_t *left)
; r0 uint8_t *dst
; r1 ptrdiff_t y_stride
; r2 const uint8_t *above
; r3 const uint8_t *left
|aom_tm_predictor_32x32_neon| PROC
; Load ytop_left = above[-1];
sub r12, r2, #1
vld1.8 {d0[]}, [r12]
; Load above 32 pixels
vld1.8 {q1}, [r2]!
vld1.8 {q2}, [r2]
; preload 8 left pixels
vld1.8 {d26}, [r3]!
; Compute above - ytop_left
vsubl.u8 q8, d2, d0
vsubl.u8 q9, d3, d0
vsubl.u8 q10, d4, d0
vsubl.u8 q11, d5, d0
vmovl.u8 q3, d26
; Load left row by row and compute left + (above - ytop_left)
; Process 8 rows in each single loop and loop 4 times to process 32 rows.
mov r2, #4
loop_32x32_neon
; Process two rows.
vdup.16 q0, d6[0]
vdup.16 q2, d6[1]
vadd.s16 q12, q0, q8
vadd.s16 q13, q0, q9
vadd.s16 q14, q0, q10
vadd.s16 q15, q0, q11
vqmovun.s16 d0, q12
vqmovun.s16 d1, q13
vadd.s16 q12, q2, q8
vadd.s16 q13, q2, q9
vqmovun.s16 d2, q14
vqmovun.s16 d3, q15
vadd.s16 q14, q2, q10
vadd.s16 q15, q2, q11
vst1.64 {d0-d3}, [r0], r1
vqmovun.s16 d24, q12
vqmovun.s16 d25, q13
vqmovun.s16 d26, q14
vqmovun.s16 d27, q15
vdup.16 q1, d6[2]
vdup.16 q2, d6[3]
vst1.64 {d24-d27}, [r0], r1
; Process two rows.
vadd.s16 q12, q1, q8
vadd.s16 q13, q1, q9
vadd.s16 q14, q1, q10
vadd.s16 q15, q1, q11
vqmovun.s16 d0, q12
vqmovun.s16 d1, q13
vadd.s16 q12, q2, q8
vadd.s16 q13, q2, q9
vqmovun.s16 d2, q14
vqmovun.s16 d3, q15
vadd.s16 q14, q2, q10
vadd.s16 q15, q2, q11
vst1.64 {d0-d3}, [r0], r1
vqmovun.s16 d24, q12
vqmovun.s16 d25, q13
vqmovun.s16 d26, q14
vqmovun.s16 d27, q15
vdup.16 q0, d7[0]
vdup.16 q2, d7[1]
vst1.64 {d24-d27}, [r0], r1