Skip to content
Snippets Groups Projects
Commit 0bc1ecc2 authored by Jonathan Lennox's avatar Jonathan Lennox Committed by Jean-Marc Valin
Browse files

Create OPUS_FAST_INT64 macro, to abstract conditions where opus_int64 should be used.

This patch adds a macro abstracting the condition under which the silk
math macros use opus_int64-based calculations rather than opus_int32.
No substantive change, but will make it easier to adjust if additional
such platforms are found in the future.
parent a7c1ebf6
No related branches found
No related tags found
No related merge requests found
......@@ -43,17 +43,20 @@ POSSIBILITY OF SUCH DAMAGE.
#define opus_unlikely(x) (!!(x))
#endif
/* Set this if opus_int64 is a native type of the CPU. */
#define OPUS_FAST_INT64 (defined(__x86_64__) || defined(__LP64__) || defined(_WIN64))
/* This is an OPUS_INLINE header file for general platform. */
/* (a32 * (opus_int32)((opus_int16)(b32))) >> 16 output have to be 32bit int */
#if defined(__x86_64__) || defined(__LP64__) || defined(_WIN64)
#if OPUS_FAST_INT64
#define silk_SMULWB(a32, b32) (((a32) * (opus_int64)((opus_int16)(b32))) >> 16)
#else
#define silk_SMULWB(a32, b32) ((((a32) >> 16) * (opus_int32)((opus_int16)(b32))) + ((((a32) & 0x0000FFFF) * (opus_int32)((opus_int16)(b32))) >> 16))
#endif
/* a32 + (b32 * (opus_int32)((opus_int16)(c32))) >> 16 output have to be 32bit int */
#if defined(__x86_64__) || defined(__LP64__) || defined(_WIN64)
#if OPUS_FAST_INT64
#define silk_SMLAWB(a32, b32, c32) ((a32) + (((b32) * (opus_int64)((opus_int16)(c32))) >> 16))
#else
#define silk_SMLAWB(a32, b32, c32) ((a32) + ((((b32) >> 16) * (opus_int32)((opus_int16)(c32))) + ((((b32) & 0x0000FFFF) * (opus_int32)((opus_int16)(c32))) >> 16)))
......@@ -63,7 +66,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define silk_SMULWT(a32, b32) (((a32) >> 16) * ((b32) >> 16) + ((((a32) & 0x0000FFFF) * ((b32) >> 16)) >> 16))
/* a32 + (b32 * (c32 >> 16)) >> 16 */
#if defined(__x86_64__) || defined(__LP64__) || defined(_WIN64)
#if OPUS_FAST_INT64
#define silk_SMLAWT(a32, b32, c32) ((a32) + (((b32) * ((opus_int64)(c32) >> 16)) >> 16))
#else
#define silk_SMLAWT(a32, b32, c32) ((a32) + (((b32) >> 16) * ((c32) >> 16)) + ((((b32) & 0x0000FFFF) * ((c32) >> 16)) >> 16))
......@@ -85,14 +88,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define silk_SMLAL(a64, b32, c32) (silk_ADD64((a64), ((opus_int64)(b32) * (opus_int64)(c32))))
/* (a32 * b32) >> 16 */
#if defined(__x86_64__) || defined(__LP64__) || defined(_WIN64)
#if OPUS_FAST_INT64
#define silk_SMULWW(a32, b32) (((opus_int64)(a32) * (b32)) >> 16)
#else
#define silk_SMULWW(a32, b32) silk_MLA(silk_SMULWB((a32), (b32)), (a32), silk_RSHIFT_ROUND((b32), 16))
#endif
/* a32 + ((b32 * c32) >> 16) */
#if defined(__x86_64__) || defined(__LP64__) || defined(_WIN64)
#if OPUS_FAST_INT64
#define silk_SMLAWW(a32, b32, c32) ((a32) + (((opus_int64)(b32) * (c32)) >> 16))
#else
#define silk_SMLAWW(a32, b32, c32) silk_MLA(silk_SMLAWB((a32), (b32), (c32)), (b32), silk_RSHIFT_ROUND((c32), 16))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment