Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Yushin Cho
aom-rav1e
Commits
327b138b
Commit
327b138b
authored
Apr 02, 2015
by
Johann
Committed by
Gerrit Code Review
Apr 02, 2015
Browse files
Merge "Remove PPC build support"
parents
6eb05c9e
bc98e93b
Changes
19
Expand all
Hide whitespace changes
Inline
Side-by-side
README
View file @
327b138b
...
...
@@ -62,12 +62,6 @@ COMPILING THE APPLICATIONS/LIBRARIES:
armv7s-darwin-gcc
mips32-linux-gcc
mips64-linux-gcc
ppc32-darwin8-gcc
ppc32-darwin9-gcc
ppc32-linux-gcc
ppc64-darwin8-gcc
ppc64-darwin9-gcc
ppc64-linux-gcc
sparc-solaris-gcc
x86-android-gcc
x86-darwin8-gcc
...
...
build/make/configure.sh
View file @
327b138b
...
...
@@ -640,12 +640,6 @@ process_common_toolchain() {
*
i[3456]86
*
)
tgt_isa
=
x86
;;
*
powerpc64
*
)
tgt_isa
=
ppc64
;;
*
powerpc
*
)
tgt_isa
=
ppc32
;;
*
sparc
*
)
tgt_isa
=
sparc
;;
...
...
@@ -1070,29 +1064,6 @@ EOF
check_add_asflags
-march
=
${
tgt_isa
}
check_add_asflags
-KPIC
;;
ppc
*
)
enable_feature ppc
bits
=
${
tgt_isa
##ppc
}
link_with_cc
=
gcc
setup_gnu_toolchain
add_asflags
-force_cpusubtype_ALL
-I
"
\$
(dir
\$
<)darwin"
soft_enable altivec
enabled altivec
&&
add_cflags
-maltivec
case
"
$tgt_os
"
in
linux
*
)
add_asflags
-maltivec
-mregnames
-I
"
\$
(dir
\$
<)linux"
;;
darwin
*
)
darwin_arch
=
"-arch ppc"
enabled ppc64
&&
darwin_arch
=
"
${
darwin_arch
}
64"
add_cflags
${
darwin_arch
}
-m
${
bits
}
-fasm-blocks
add_asflags
${
darwin_arch
}
-force_cpusubtype_ALL
-I
"
\$
(dir
\$
<)darwin"
add_ldflags
${
darwin_arch
}
-m
${
bits
}
enabled altivec
&&
add_cflags
-faltivec
;;
esac
;;
x86
*
)
case
${
tgt_os
}
in
win
*
)
...
...
configure
View file @
327b138b
...
...
@@ -112,12 +112,6 @@ all_platforms="${all_platforms} armv7-win32-vs12"
all_platforms
=
"
${
all_platforms
}
armv7s-darwin-gcc"
all_platforms
=
"
${
all_platforms
}
mips32-linux-gcc"
all_platforms
=
"
${
all_platforms
}
mips64-linux-gcc"
all_platforms
=
"
${
all_platforms
}
ppc32-darwin8-gcc"
all_platforms
=
"
${
all_platforms
}
ppc32-darwin9-gcc"
all_platforms
=
"
${
all_platforms
}
ppc32-linux-gcc"
all_platforms
=
"
${
all_platforms
}
ppc64-darwin8-gcc"
all_platforms
=
"
${
all_platforms
}
ppc64-darwin9-gcc"
all_platforms
=
"
${
all_platforms
}
ppc64-linux-gcc"
all_platforms
=
"
${
all_platforms
}
sparc-solaris-gcc"
all_platforms
=
"
${
all_platforms
}
x86-android-gcc"
all_platforms
=
"
${
all_platforms
}
x86-darwin8-gcc"
...
...
@@ -247,8 +241,6 @@ ARCH_LIST="
mips
x86
x86_64
ppc32
ppc64
"
ARCH_EXT_LIST
=
"
edsp
...
...
@@ -621,12 +613,6 @@ process_toolchain() {
universal-darwin
*
)
darwin_ver
=
${
tgt_os
##darwin
}
# Snow Leopard (10.6/darwin10) dropped support for PPC
# Include PPC support for all prior versions
if
[
$darwin_ver
-lt
10
]
;
then
fat_bin_archs
=
"
$fat_bin_archs
ppc32-
${
tgt_os
}
-gcc"
fi
# Tiger (10.4/darwin8) brought support for x86
if
[
$darwin_ver
-ge
8
]
;
then
fat_bin_archs
=
"
$fat_bin_archs
x86-
${
tgt_os
}
-
${
tgt_cc
}
"
...
...
@@ -727,7 +713,7 @@ process_toolchain() {
esac
# Other toolchain specific defaults
case
$toolchain
in
x86
*
|
ppc
*
|
universal
*
)
soft_enable postproc
;;
esac
case
$toolchain
in
x86
*
|
universal
*
)
soft_enable postproc
;;
esac
if
enabled postproc_visualizer
;
then
enabled postproc
||
die
"postproc_visualizer requires postproc to be enabled"
...
...
vp8/common/ppc/copy_altivec.asm
deleted
100644 → 0
View file @
6eb05c9e
;
; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
.globl
copy_mem16x16_ppc
;# r3 unsigned char *src
;# r4 int src_stride
;# r5 unsigned char *dst
;# r6 int dst_stride
;# Make the assumption that input will not be aligned,
;# but the output will be. So two reads and a perm
;# for the input, but only one store for the output.
copy_mem16x16_ppc:
mfspr
r11
,
256
;# get old VRSAVE
oris
r12
,
r11
,
0xe000
mtspr
256
,
r12
;# set VRSAVE
li
r10
,
16
mtctr
r10
cp_16x16_loop:
lvsl
v0
,
0
,
r3
;# permutate value for alignment
lvx
v1
,
0
,
r3
lvx
v2
,
r10
,
r3
vperm
v1
,
v1
,
v2
,
v0
stvx
v1
,
0
,
r5
add
r3
,
r3
,
r4
;# increment source pointer
add
r5
,
r5
,
r6
;# increment destination pointer
bdnz
cp_16x16_loop
mtspr
256
,
r11
;# reset old VRSAVE
blr
vp8/common/ppc/filter_altivec.asm
deleted
100644 → 0
View file @
6eb05c9e
This diff is collapsed.
Click to expand it.
vp8/common/ppc/filter_bilinear_altivec.asm
deleted
100644 → 0
View file @
6eb05c9e
;
; Copyright (c) 2010 The WebM project authors. All Rights Reserved.
;
; Use of this source code is governed by a BSD-style license
; that can be found in the LICENSE file in the root of the source
; tree. An additional intellectual property rights grant can be found
; in the file PATENTS. All contributing project authors may
; be found in the AUTHORS file in the root of the source tree.
;
.globl
bilinear_predict4x4_ppc
.globl
bilinear_predict8x4_ppc
.globl
bilinear_predict8x8_ppc
.globl
bilinear_predict16x16_ppc
.macro
load_c
V
,
LABEL
,
OFF
,
R0
,
R1
lis
\
R0
,
\
LABEL@ha
la
\
R1
,
\
LABEL@l
(
\
R0
)
lvx
\
V
,
\
OFF
,
\
R1
.endm
.macro
load_vfilter
V0
,
V1
load_c
\
V0
,
vfilter_b
,
r6
,
r9
,
r10
addi
r6
,
r6
,
16
lvx
\
V1
,
r6
,
r10
.endm
.macro
HProlog
jump_label
;# load up horizontal filter
slwi.
r5
,
r5
,
4
;# index into horizontal filter array
;# index to the next set of vectors in the row.
li
r10
,
16
li
r12
,
32
;# downshift by 7 ( divide by 128 ) at the end
vspltish
v19
,
7
;# If there isn't any filtering to be done for the horizontal, then
;# just skip to the second pass.
beq
\
jump_label
load_c
v20
,
hfilter_b
,
r5
,
r9
,
r0
;# setup constants
;# v14 permutation value for alignment
load_c
v28
,
b_hperm_b
,
0
,
r9
,
r0
;# rounding added in on the multiply
vspltisw
v21
,
8
vspltisw
v18
,
3
vslw
v18
,
v21
,
v18
;# 0x00000040000000400000004000000040
slwi.
r6
,
r6
,
5
;# index into vertical filter array
.endm
;# Filters a horizontal line
;# expects:
;# r3 src_ptr
;# r4 pitch
;# r10 16
;# r12 32
;# v17 perm intput
;# v18 rounding
;# v19 shift
;# v20 filter taps
;# v21 tmp
;# v22 tmp
;# v23 tmp
;# v24 tmp
;# v25 tmp
;# v26 tmp
;# v27 tmp
;# v28 perm output
;#
.macro
HFilter
V
vperm
v24
,
v21
,
v21
,
v10
;# v20 = 0123 1234 2345 3456
vperm
v25
,
v21
,
v21
,
v11
;# v21 = 4567 5678 6789 789A
vmsummbm
v24
,
v20
,
v24
,
v18
vmsummbm
v25
,
v20
,
v25
,
v18
vpkswus
v24
,
v24
,
v25
;# v24 = 0 4 8 C 1 5 9 D (16-bit)
vsrh
v24
,
v24
,
v19
;# divide v0, v1 by 128
vpkuhus
\
V
,
v24
,
v24
;# \V = scrambled 8-bit result
.endm
.macro
hfilter_8
V
,
increment_counter
lvsl
v17
,
0
,
r3
;# permutate value for alignment
;# input to filter is 9 bytes wide, output is 8 bytes.
lvx
v21
,
0
,
r3
lvx
v22
,
r10
,
r3
.if
\
increment_counter
add
r3
,
r3
,
r4
.endif
vperm
v21
,
v21
,
v22
,
v17
HFilter
\
V
.endm
.macro
load_and_align_8
V
,
increment_counter
lvsl
v17
,
0
,
r3
;# permutate value for alignment
;# input to filter is 21 bytes wide, output is 16 bytes.
;# input will can span three vectors if not aligned correctly.
lvx
v21
,
0
,
r3
lvx
v22
,
r10
,
r3
.if
\
increment_counter
add
r3
,
r3
,
r4
.endif
vperm
\
V
,
v21
,
v22
,
v17
.endm
.macro
write_aligned_8
V
,
increment_counter
stvx
\
V
,
0
,
r7
.if
\
increment_counter
add
r7
,
r7
,
r8
.endif
.endm
.macro
vfilter_16
P0
P1
vmuleub
v22
,
\
P0
,
v20
;# 64 + 4 positive taps
vadduhm
v22
,
v18
,
v22
vmuloub
v23
,
\
P0
,
v20
vadduhm
v23
,
v18
,
v23
vmuleub
v24
,
\
P1
,
v21
vadduhm
v22
,
v22
,
v24
;# Re = evens, saturation unnecessary
vmuloub
v25
,
\
P1
,
v21
vadduhm
v23
,
v23
,
v25
;# Ro = odds
vsrh
v22
,
v22
,
v19
;# divide by 128
vsrh
v23
,
v23
,
v19
;# v16 v17 = evens, odds
vmrghh
\
P0
,
v22
,
v23
;# v18 v19 = 16-bit result in order
vmrglh
v23
,
v22
,
v23
vpkuhus
\
P0
,
\
P0
,
v23
;# P0 = 8-bit result
.endm
.macro
w_8x8
V
,
D
,
R
,
P
stvx
\
V
,
0
,
r1
lwz
\
R
,
0
(
r1
)
stw
\
R
,
0
(
r7
)
lwz
\
R
,
4
(
r1
)
stw
\
R
,
4
(
r7
)
add
\
D
,
\
D
,
\
P
.endm
.align
2
;# r3 unsigned char * src
;# r4 int src_pitch
;# r5 int x_offset
;# r6 int y_offset
;# r7 unsigned char * dst
;# r8 int dst_pitch
bilinear_predict4x4_ppc:
mfspr
r11
,
256
;# get old VRSAVE
oris
r12
,
r11
,
0xf830
ori
r12
,
r12
,
0xfff8
mtspr
256
,
r12
;# set VRSAVE
stwu
r1
,
-
32
(
r1
)
;# create space on the stack
HProlog
second_pass_4x4_pre_copy_b
;# Load up permutation constants
load_c
v10
,
b_0123_b
,
0
,
r9
,
r12
load_c
v11
,
b_4567_b
,
0
,
r9
,
r12
hfilter_8
v0
,
1
hfilter_8
v1
,
1
hfilter_8
v2
,
1
hfilter_8
v3
,
1
;# Finished filtering main horizontal block. If there is no
;# vertical filtering, jump to storing the data. Otherwise
;# load up and filter the additional line that is needed
;# for the vertical filter.
beq
store_out_4x4_b
hfilter_8
v4
,
0
b
second_pass_4x4_b
second_pass_4x4_pre_copy_b:
slwi
r6
,
r6
,
5
;# index into vertical filter array
load_and_align_8
v0
,
1
load_and_align_8
v1
,
1
load_and_align_8
v2
,
1
load_and_align_8
v3
,
1
load_and_align_8
v4
,
1
second_pass_4x4_b:
vspltish
v20
,
8
vspltish
v18
,
3
vslh
v18
,
v20
,
v18
;# 0x0040 0040 0040 0040 0040 0040 0040 0040
load_vfilter
v20
,
v21
vfilter_16
v0
,
v1
vfilter_16
v1
,
v2
vfilter_16
v2
,
v3
vfilter_16
v3
,
v4
store_out_4x4_b:
stvx
v0
,
0
,
r1
lwz
r0
,
0
(
r1
)
stw
r0
,
0
(
r7
)
add
r7
,
r7
,
r8
stvx
v1
,
0
,
r1
lwz
r0
,
0
(
r1
)
stw
r0
,
0
(
r7
)
add
r7
,
r7
,
r8
stvx
v2
,
0
,
r1
lwz
r0
,
0
(
r1
)
stw
r0
,
0
(
r7
)
add
r7
,
r7
,
r8
stvx
v3
,
0
,
r1
lwz
r0
,
0
(
r1
)
stw
r0
,
0
(
r7
)
exit_4x4:
addi
r1
,
r1
,
32
;# recover stack
mtspr
256
,
r11
;# reset old VRSAVE
blr
.align
2
;# r3 unsigned char * src
;# r4 int src_pitch
;# r5 int x_offset
;# r6 int y_offset
;# r7 unsigned char * dst
;# r8 int dst_pitch
bilinear_predict8x4_ppc:
mfspr
r11
,
256
;# get old VRSAVE
oris
r12
,
r11
,
0xf830
ori
r12
,
r12
,
0xfff8
mtspr
256
,
r12
;# set VRSAVE
stwu
r1
,
-
32
(
r1
)
;# create space on the stack
HProlog
second_pass_8x4_pre_copy_b
;# Load up permutation constants
load_c
v10
,
b_0123_b
,
0
,
r9
,
r12
load_c
v11
,
b_4567_b
,
0
,
r9
,
r12
hfilter_8
v0
,
1
hfilter_8
v1
,
1
hfilter_8
v2
,
1
hfilter_8
v3
,
1
;# Finished filtering main horizontal block. If there is no
;# vertical filtering, jump to storing the data. Otherwise
;# load up and filter the additional line that is needed
;# for the vertical filter.
beq
store_out_8x4_b
hfilter_8
v4
,
0
b
second_pass_8x4_b
second_pass_8x4_pre_copy_b:
slwi
r6
,
r6
,
5
;# index into vertical filter array
load_and_align_8
v0
,
1
load_and_align_8
v1
,
1
load_and_align_8
v2
,
1
load_and_align_8
v3
,
1
load_and_align_8
v4
,
1
second_pass_8x4_b:
vspltish
v20
,
8
vspltish
v18
,
3
vslh
v18
,
v20
,
v18
;# 0x0040 0040 0040 0040 0040 0040 0040 0040
load_vfilter
v20
,
v21
vfilter_16
v0
,
v1
vfilter_16
v1
,
v2
vfilter_16
v2
,
v3
vfilter_16
v3
,
v4
store_out_8x4_b:
cmpi
cr0
,
r8
,
8
beq
cr0
,
store_aligned_8x4_b
w_8x8
v0
,
r7
,
r0
,
r8
w_8x8
v1
,
r7
,
r0
,
r8
w_8x8
v2
,
r7
,
r0
,
r8
w_8x8
v3
,
r7
,
r0
,
r8
b
exit_8x4
store_aligned_8x4_b:
load_c
v10
,
b_hilo_b
,
0
,
r9
,
r10
vperm
v0
,
v0
,
v1
,
v10
vperm
v2
,
v2
,
v3
,
v10
stvx
v0
,
0
,
r7
addi
r7
,
r7
,
16
stvx
v2
,
0
,
r7
exit_8x4:
addi
r1
,
r1
,
32
;# recover stack
mtspr
256
,
r11
;# reset old VRSAVE
blr
.align
2
;# r3 unsigned char * src
;# r4 int src_pitch
;# r5 int x_offset
;# r6 int y_offset
;# r7 unsigned char * dst
;# r8 int dst_pitch
bilinear_predict8x8_ppc:
mfspr
r11
,
256
;# get old VRSAVE
oris
r12
,
r11
,
0xfff0
ori
r12
,
r12
,
0xffff
mtspr
256
,
r12
;# set VRSAVE
stwu
r1
,
-
32
(
r1
)
;# create space on the stack
HProlog
second_pass_8x8_pre_copy_b
;# Load up permutation constants
load_c
v10
,
b_0123_b
,
0
,
r9
,
r12
load_c
v11
,
b_4567_b
,
0
,
r9
,
r12
hfilter_8
v0
,
1
hfilter_8
v1
,
1
hfilter_8
v2
,
1
hfilter_8
v3
,
1
hfilter_8
v4
,
1
hfilter_8
v5
,
1
hfilter_8
v6
,
1
hfilter_8
v7
,
1
;# Finished filtering main horizontal block. If there is no
;# vertical filtering, jump to storing the data. Otherwise
;# load up and filter the additional line that is needed
;# for the vertical filter.
beq
store_out_8x8_b
hfilter_8
v8
,
0
b
second_pass_8x8_b
second_pass_8x8_pre_copy_b:
slwi
r6
,
r6
,
5
;# index into vertical filter array
load_and_align_8
v0
,
1
load_and_align_8
v1
,
1
load_and_align_8
v2
,
1
load_and_align_8
v3
,
1
load_and_align_8
v4
,
1
load_and_align_8
v5
,
1
load_and_align_8
v6
,
1
load_and_align_8
v7
,
1
load_and_align_8
v8
,
0
second_pass_8x8_b:
vspltish
v20
,
8
vspltish
v18
,
3
vslh
v18
,
v20
,
v18
;# 0x0040 0040 0040 0040 0040 0040 0040 0040
load_vfilter
v20
,
v21
vfilter_16
v0
,
v1
vfilter_16
v1
,
v2
vfilter_16
v2
,
v3
vfilter_16
v3
,
v4
vfilter_16
v4
,
v5
vfilter_16
v5
,
v6
vfilter_16
v6
,
v7
vfilter_16
v7
,
v8
store_out_8x8_b:
cmpi
cr0
,
r8
,
8
beq
cr0
,
store_aligned_8x8_b
w_8x8
v0
,
r7
,
r0
,
r8
w_8x8
v1
,
r7
,
r0
,
r8
w_8x8
v2
,
r7
,
r0
,
r8
w_8x8
v3
,
r7
,
r0
,
r8
w_8x8
v4
,
r7
,
r0
,
r8
w_8x8
v5
,
r7
,
r0
,
r8
w_8x8
v6
,
r7
,
r0
,
r8
w_8x8
v7
,
r7
,
r0
,
r8
b
exit_8x8
store_aligned_8x8_b:
load_c
v10
,
b_hilo_b
,
0
,
r9
,
r10
vperm
v0
,
v0
,
v1
,
v10
vperm
v2
,
v2
,
v3
,
v10
vperm
v4
,
v4
,
v5
,
v10
vperm
v6
,
v6
,
v7
,
v10
stvx
v0
,
0
,
r7
addi
r7
,
r7
,
16
stvx
v2
,
0
,
r7
addi
r7
,
r7
,
16
stvx
v4
,
0
,
r7
addi
r7
,
r7
,
16
stvx
v6
,
0
,
r7
exit_8x8:
addi
r1
,
r1
,
32
;# recover stack
mtspr
256
,
r11
;# reset old VRSAVE
blr
;# Filters a horizontal line
;# expects:
;# r3 src_ptr
;# r4 pitch
;# r10 16
;# r12 32
;# v17 perm intput
;# v18 rounding
;# v19 shift
;# v20 filter taps
;# v21 tmp
;# v22 tmp
;# v23 tmp
;# v24 tmp
;# v25 tmp
;# v26 tmp
;# v27 tmp
;# v28 perm output
;#
.macro
hfilter_16
V
,
increment_counter
lvsl
v17
,
0
,
r3
;# permutate value for alignment
;# input to filter is 21 bytes wide, output is 16 bytes.
;# input will can span three vectors if not aligned correctly.
lvx
v21
,
0
,
r3
lvx
v22
,
r10
,
r3