diff --git a/.gitignore b/.gitignore index b4cec23a7375209ff9aa63e0cffa264ffab0a94f..4358b9c40a5d0f10d12758a4627bf9f6cbb32ac3 100644 --- a/.gitignore +++ b/.gitignore @@ -39,8 +39,8 @@ /examples/vp8cx_set_ref /examples/vp9_lossless_encoder /examples/vp9_spatial_scalable_encoder -/examples/vpx_temporal_scalable_patterns -/examples/vpx_temporal_svc_encoder +/examples/aom_temporal_scalable_patterns +/examples/aom_temporal_svc_encoder /ivfdec /ivfdec.dox /ivfenc @@ -52,14 +52,14 @@ /test_libaom /vp8_api1_migration.dox /vp[89x]_rtcd.h -/vpx.pc -/vpx_config.c -/vpx_config.h -/vpx_dsp_rtcd.h -/vpx_scale_rtcd.h -/vpx_version.h -/vpxdec -/vpxdec.dox -/vpxenc -/vpxenc.dox +/aom.pc +/aom_config.c +/aom_config.h +/aom_dsp_rtcd.h +/aom_scale_rtcd.h +/aom_version.h +/aomdec +/aomdec.dox +/aomenc +/aomenc.dox TAGS diff --git a/CHANGELOG b/CHANGELOG index 7746cc6c4f9c22b0053a79cbb3124da21914f759..4f233d5930321e051d287df59aec99d954722ed9 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -6,7 +6,7 @@ This release is ABI incompatible with 1.4.0. It drops deprecated VP8 controls and adds a variety of VP9 controls for testing. - The vpxenc utility now prefers VP9 by default. + The aomenc utility now prefers VP9 by default. - Enhancements: Faster VP9 encoding and decoding @@ -45,7 +45,7 @@ The VP9 encoder in this release is not feature complete. Users interested in the encoder are advised to use the git master branch and discuss issues on - libvpx mailing lists. + libaom mailing lists. - Upgrading: This release is ABI and API compatible with Duclair (v1.0.0). Users @@ -56,7 +56,7 @@ Get rid of bashisms in the main build scripts Added usage info on command line options Add lossless compression mode - Dll build of libvpx + Dll build of libaom Add additional Mac OS X targets: 10.7, 10.8 and 10.9 (darwin11-13) Add option to disable documentation configure: add --enable-external-build support @@ -64,11 +64,11 @@ configure: support mingw-w64 configure: support hardfloat armv7 CHOSTS configure: add support for android x86 - Add estimated completion time to vpxenc - Don't exit on decode errors in vpxenc - vpxenc: support scaling prior to encoding - vpxdec: support scaling output - vpxenc: improve progress indicators with --skip + Add estimated completion time to aomenc + Don't exit on decode errors in aomenc + aomenc: support scaling prior to encoding + aomdec: support scaling output + aomenc: improve progress indicators with --skip msvs: Don't link to winmm.lib Add a new script for producing vcxproj files Produce Visual Studio 10 and 11 project files @@ -100,7 +100,7 @@ - Enhancements: VP8 optimizations for MIPS dspr2 - vpxenc: add -quiet option + aomenc: add -quiet option - Speed: Encoder and decoder speed is consistent with the Eider release. @@ -138,10 +138,10 @@ is more computationally intensive than the spatial one. This release removes support for a legacy, decode only API that was - supported, but deprecated, at the initial release of libvpx + supported, but deprecated, at the initial release of libaom (v0.9.0). This is not expected to have any impact. If you are impacted, you can apply a reversion to commit 2bf8fb58 locally. - Please update to the latest libvpx API if you are affected. + Please update to the latest libaom API if you are affected. - Enhancements: Adds a motion compensated temporal denoiser to the encoder, which @@ -155,17 +155,17 @@ OS/2 support SunCC support - Changing resolution with vpx_codec_enc_config_set() is now + Changing resolution with aom_codec_enc_config_set() is now supported. Previously, reinitializing the codec was required to change the input resolution. - The vpxenc application has initial support for producing multiple + The aomenc application has initial support for producing multiple encodes from the same input in one call. Resizing is not yet supported, but varying other codec parameters is. Use -- to delineate output streams. Options persist from one stream to the next. - Also, the vpxenc application will now use a keyframe interval of + Also, the aomenc application will now use a keyframe interval of 5 seconds by default. Use the --kf-max-dist option to override. - Speed: @@ -202,7 +202,7 @@ enhancement (MFQE) in sections of the frame where there is motion. (#392) - Fixed corruption issues when vpx_codec_enc_config_set() was called + Fixed corruption issues when aom_codec_enc_config_set() was called with spatial resampling enabled. Fixed a decoder error introduced in Duclair where the segmentation @@ -215,9 +215,9 @@ v0.9.7, so all users of that release are encouraged to upgrade. - Upgrading: - This release is ABI incompatible with prior releases of libvpx, so the + This release is ABI incompatible with prior releases of libaom, so the "major" version number has been bumped to 1. You must recompile your - applications against the latest version of the libvpx headers. The + applications against the latest version of the libaom headers. The API remains compatible, and this should not require code changes in most applications. @@ -268,7 +268,7 @@ Cayuga) given a frame with corrupt partition sizes. A bounded out of bounds read was discovered affecting all - versions of libvpx. Given an multipartition input frame that + versions of libaom. Given an multipartition input frame that is truncated between the mode/mv partition and the first residiual paritition (in the block of partition offsets), up to 3 extra bytes could have been read from the source buffer. @@ -296,16 +296,16 @@ notes in this document for that release. - Enhancements: - Stereo 3D format support for vpxenc + Stereo 3D format support for aomenc Runtime detection of available processor cores. Allow specifying --end-usage by enum name - vpxdec: test for frame corruption - vpxenc: add quantizer histogram display - vpxenc: add rate histogram display + aomdec: test for frame corruption + aomenc: add quantizer histogram display + aomenc: add rate histogram display Set VPX_FRAME_IS_DROPPABLE update configure for ios sdk 4.3 Avoid text relocations in ARM vp8 decoder - Generate a vpx.pc file for pkg-config. + Generate a aom.pc file for pkg-config. New ways of passing encoded data between encoder and decoder. - Speed: @@ -366,7 +366,7 @@ Fix semaphore emulation, spin-wait intrinsics on Windows Fix build with xcode4 and simplify GLOBAL. Mark ARM asm objects as allowing a non-executable stack. - Fix vpxenc encoding incorrect webm file header on big endian + Fix aomenc encoding incorrect webm file header on big endian 2011-03-07 v0.9.6 "Bali" @@ -378,7 +378,7 @@ document for that release. - Enhancements: - vpxenc --psnr shows a summary when encode completes + aomenc --psnr shows a summary when encode completes --tune=ssim option to enable activity masking improved postproc visualizations for development updated support for Apple iOS to SDK 4.2 @@ -451,9 +451,9 @@ - Upgrading: This release incorporates backwards-incompatible changes to the - ivfenc and ivfdec tools. These tools are now called vpxenc and vpxdec. + ivfenc and ivfdec tools. These tools are now called aomenc and aomdec. - vpxdec + aomdec * the -q (quiet) option has been removed, and replaced with -v (verbose). the output is quiet by default. Use -v to see the version number of the binary. @@ -466,13 +466,13 @@ options must be specified. $ ivfdec -o OUTPUT INPUT - $ vpxdec --i420 -o OUTPUT INPUT + $ aomdec --i420 -o OUTPUT INPUT * If an output file is not specified, the default is to write Y4M to stdout. This makes piping more natural. $ ivfdec -y -o - INPUT | ... - $ vpxdec INPUT | ... + $ aomdec INPUT | ... * The output file has additional flexibility for formatting the filename. It supports escape characters for constructing a @@ -480,33 +480,33 @@ replaces the -p option. To get the equivalent: $ ivfdec -p frame INPUT - $ vpxdec --i420 -o frame-%wx%h-%4.i420 INPUT + $ aomdec --i420 -o frame-%wx%h-%4.i420 INPUT - vpxenc + aomenc * The output file must be specified with -o, rather than as the last argument. $ ivfenc INPUT OUTPUT - $ vpxenc -o OUTPUT INPUT + $ aomenc -o OUTPUT INPUT * The output defaults to webm. To get IVF output, use the --ivf option. $ ivfenc INPUT OUTPUT.ivf - $ vpxenc -o OUTPUT.ivf --ivf INPUT + $ aomenc -o OUTPUT.ivf --ivf INPUT - Enhancements: - ivfenc and ivfdec have been renamed to vpxenc, vpxdec. - vpxdec supports .webm input - vpxdec writes .y4m by default - vpxenc writes .webm output by default - vpxenc --psnr now shows the average/overall PSNR at the end + ivfenc and ivfdec have been renamed to aomenc, aomdec. + aomdec supports .webm input + aomdec writes .y4m by default + aomenc writes .webm output by default + aomenc --psnr now shows the average/overall PSNR at the end ARM platforms now support runtime cpu detection - vpxdec visualizations added for motion vectors, block modes, references - vpxdec now silent by default - vpxdec --progress shows frame-by-frame timing information - vpxenc supports the distinction between --fps and --timebase + aomdec visualizations added for motion vectors, block modes, references + aomdec now silent by default + aomdec --progress shows frame-by-frame timing information + aomenc supports the distinction between --fps and --timebase NASM is now a supported assembler configure: enable PIC for shared libs by default configure: add --enable-small @@ -521,7 +521,7 @@ Build fixes for darwin-icc - Speed: - 20-40% (average 28%) improvement in libvpx decoder speed, + 20-40% (average 28%) improvement in libaom decoder speed, including: Rewrite vp8_short_walsh4x4_sse2() Optimizations on the loopfilters. diff --git a/aom/vpx_codec.h b/aom/aom_codec.h similarity index 78% rename from aom/vpx_codec.h rename to aom/aom_codec.h index 8ae27afb7d9873699031c5183b73383c0683bc37..385c59012532725e82c8741d497582b6805d9963 100644 --- a/aom/vpx_codec.h +++ b/aom/aom_codec.h @@ -23,18 +23,18 @@ * video codec algorithm. * * An application instantiates a specific codec instance by using - * vpx_codec_init() and a pointer to the algorithm's interface structure: + * aom_codec_init() and a pointer to the algorithm's interface structure: *
  *     my_app.c:
- *       extern vpx_codec_iface_t my_codec;
+ *       extern aom_codec_iface_t my_codec;
  *       {
- *           vpx_codec_ctx_t algo;
- *           res = vpx_codec_init(&algo, &my_codec);
+ *           aom_codec_ctx_t algo;
+ *           res = aom_codec_init(&algo, &my_codec);
  *       }
  *     
* * Once initialized, the instance is manged using other functions from - * the vpx_codec_* family. + * the aom_codec_* family. */ #ifndef VPX_VPX_CODEC_H_ #define VPX_VPX_CODEC_H_ @@ -43,8 +43,8 @@ extern "C" { #endif -#include "./vpx_integer.h" -#include "./vpx_image.h" +#include "./aom_integer.h" +#include "./aom_image.h" /*!\brief Decorator indicating a function is deprecated */ #ifndef DEPRECATED @@ -139,17 +139,17 @@ typedef enum { */ VPX_CODEC_LIST_END -} vpx_codec_err_t; +} aom_codec_err_t; /*! \brief Codec capabilities bitfield * * Each codec advertises the capabilities it supports as part of its - * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces + * ::aom_codec_iface_t interface structure. Capabilities are extra interfaces * or functionality, and are not required to be supported. * * The available flags are specified by VPX_CODEC_CAP_* defines. */ -typedef long vpx_codec_caps_t; +typedef long aom_codec_caps_t; #define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */ #define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */ @@ -160,27 +160,27 @@ typedef long vpx_codec_caps_t; * * The available flags are specified by VPX_CODEC_USE_* defines. */ -typedef long vpx_codec_flags_t; +typedef long aom_codec_flags_t; /*!\brief Codec interface structure. * * Contains function pointers and other data private to the codec * implementation. This structure is opaque to the application. */ -typedef const struct vpx_codec_iface vpx_codec_iface_t; +typedef const struct aom_codec_iface aom_codec_iface_t; /*!\brief Codec private data structure. * * Contains data private to the codec implementation. This structure is opaque * to the application. */ -typedef struct vpx_codec_priv vpx_codec_priv_t; +typedef struct aom_codec_priv aom_codec_priv_t; /*!\brief Iterator * * Opaque storage used for iterating over lists. */ -typedef const void *vpx_codec_iter_t; +typedef const void *aom_codec_iter_t; /*!\brief Codec context structure * @@ -190,39 +190,39 @@ typedef const void *vpx_codec_iter_t; * may reference the 'name' member to get a printable description of the * algorithm. */ -typedef struct vpx_codec_ctx { +typedef struct aom_codec_ctx { const char *name; /**< Printable interface name */ - vpx_codec_iface_t *iface; /**< Interface pointers */ - vpx_codec_err_t err; /**< Last returned error */ + aom_codec_iface_t *iface; /**< Interface pointers */ + aom_codec_err_t err; /**< Last returned error */ const char *err_detail; /**< Detailed info, if available */ - vpx_codec_flags_t init_flags; /**< Flags passed at init time */ + aom_codec_flags_t init_flags; /**< Flags passed at init time */ union { /**< Decoder Configuration Pointer */ - const struct vpx_codec_dec_cfg *dec; + const struct aom_codec_dec_cfg *dec; /**< Encoder Configuration Pointer */ - const struct vpx_codec_enc_cfg *enc; + const struct aom_codec_enc_cfg *enc; const void *raw; } config; /**< Configuration pointer aliasing union */ - vpx_codec_priv_t *priv; /**< Algorithm private storage */ -} vpx_codec_ctx_t; + aom_codec_priv_t *priv; /**< Algorithm private storage */ +} aom_codec_ctx_t; /*!\brief Bit depth for codec * * * This enumeration determines the bit depth of the codec. */ -typedef enum vpx_bit_depth { +typedef enum aom_bit_depth { VPX_BITS_8 = 8, /**< 8 bits */ VPX_BITS_10 = 10, /**< 10 bits */ VPX_BITS_12 = 12, /**< 12 bits */ -} vpx_bit_depth_t; +} aom_bit_depth_t; /* * Library Version Number Interface * * For example, see the following sample return values: - * vpx_codec_version() (1<<16 | 2<<8 | 3) - * vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba" - * vpx_codec_version_extra_str() "rc1-16-gec6a1ba" + * aom_codec_version() (1<<16 | 2<<8 | 3) + * aom_codec_version_str() "v1.2.3-rc1-16-gec6a1ba" + * aom_codec_version_extra_str() "rc1-16-gec6a1ba" */ /*!\brief Return the version information (as an integer) @@ -235,7 +235,7 @@ typedef enum vpx_bit_depth { * in the future. * */ -int vpx_codec_version(void); +int aom_codec_version(void); #define VPX_VERSION_MAJOR(v) \ ((v >> 16) & 0xff) /**< extract major from packed version */ #define VPX_VERSION_MINOR(v) \ @@ -244,13 +244,13 @@ int vpx_codec_version(void); ((v >> 0) & 0xff) /**< extract patch from packed version */ /*!\brief Return the version major number */ -#define vpx_codec_version_major() ((vpx_codec_version() >> 16) & 0xff) +#define aom_codec_version_major() ((aom_codec_version() >> 16) & 0xff) /*!\brief Return the version minor number */ -#define vpx_codec_version_minor() ((vpx_codec_version() >> 8) & 0xff) +#define aom_codec_version_minor() ((aom_codec_version() >> 8) & 0xff) /*!\brief Return the version patch number */ -#define vpx_codec_version_patch() ((vpx_codec_version() >> 0) & 0xff) +#define aom_codec_version_patch() ((aom_codec_version() >> 0) & 0xff) /*!\brief Return the version information (as a string) * @@ -261,24 +261,24 @@ int vpx_codec_version(void); * release candidates, prerelease versions, etc. * */ -const char *vpx_codec_version_str(void); +const char *aom_codec_version_str(void); /*!\brief Return the version information (as a string) * * Returns a printable "extra string". This is the component of the string * returned - * by vpx_codec_version_str() following the three digit version number. + * by aom_codec_version_str() following the three digit version number. * */ -const char *vpx_codec_version_extra_str(void); +const char *aom_codec_version_extra_str(void); /*!\brief Return the build configuration * * Returns a printable string containing an encoded version of the build - * configuration. This may be useful to vpx support. + * configuration. This may be useful to aom support. * */ -const char *vpx_codec_build_config(void); +const char *aom_codec_build_config(void); /*!\brief Return the name for a given interface * @@ -287,7 +287,7 @@ const char *vpx_codec_build_config(void); * \param[in] iface Interface pointer * */ -const char *vpx_codec_iface_name(vpx_codec_iface_t *iface); +const char *aom_codec_iface_name(aom_codec_iface_t *iface); /*!\brief Convert error number to printable string * @@ -299,7 +299,7 @@ const char *vpx_codec_iface_name(vpx_codec_iface_t *iface); * \param[in] err Error number. * */ -const char *vpx_codec_err_to_string(vpx_codec_err_t err); +const char *aom_codec_err_to_string(aom_codec_err_t err); /*!\brief Retrieve error synopsis for codec context * @@ -311,7 +311,7 @@ const char *vpx_codec_err_to_string(vpx_codec_err_t err); * \param[in] ctx Pointer to this instance's context. * */ -const char *vpx_codec_error(vpx_codec_ctx_t *ctx); +const char *aom_codec_error(aom_codec_ctx_t *ctx); /*!\brief Retrieve detailed error information for codec context * @@ -323,7 +323,7 @@ const char *vpx_codec_error(vpx_codec_ctx_t *ctx); * \retval NULL * No detailed information is available. */ -const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx); +const char *aom_codec_error_detail(aom_codec_ctx_t *ctx); /* REQUIRED FUNCTIONS * @@ -342,7 +342,7 @@ const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx); * \retval #VPX_CODEC_MEM_ERROR * Memory allocation failed. */ -vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx); +aom_codec_err_t aom_codec_destroy(aom_codec_ctx_t *ctx); /*!\brief Get the capabilities of an algorithm. * @@ -351,7 +351,7 @@ vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx); * \param[in] iface Pointer to the algorithm interface * */ -vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface); +aom_codec_caps_t aom_codec_get_caps(aom_codec_iface_t *iface); /*!\brief Control algorithm * @@ -365,7 +365,7 @@ vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface); * be dispatched. * * Note that this function should not be used directly. Call the - * #vpx_codec_control wrapper macro instead. + * #aom_codec_control wrapper macro instead. * * \param[in] ctx Pointer to this instance's context * \param[in] ctrl_id Algorithm specific control identifier @@ -377,30 +377,30 @@ vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface); * \retval #VPX_CODEC_INVALID_PARAM * The data was not valid. */ -vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...); +aom_codec_err_t aom_codec_control_(aom_codec_ctx_t *ctx, int ctrl_id, ...); #if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS -#define vpx_codec_control(ctx, id, data) vpx_codec_control_(ctx, id, data) +#define aom_codec_control(ctx, id, data) aom_codec_control_(ctx, id, data) #define VPX_CTRL_USE_TYPE(id, typ) #define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) #define VPX_CTRL_VOID(id, typ) #else -/*!\brief vpx_codec_control wrapper macro +/*!\brief aom_codec_control wrapper macro * * This macro allows for type safe conversions across the variadic parameter - * to vpx_codec_control_(). + * to aom_codec_control_(). * * \internal * It works by dispatching the call to the control function through a wrapper * function named with the id parameter. */ -#define vpx_codec_control(ctx, id, data) \ - vpx_codec_control_##id(ctx, id, data) /**<\hideinitializer*/ +#define aom_codec_control(ctx, id, data) \ + aom_codec_control_##id(ctx, id, data) /**<\hideinitializer*/ -/*!\brief vpx_codec_control type definition macro +/*!\brief aom_codec_control type definition macro * * This macro allows for type safe conversions across the variadic parameter - * to vpx_codec_control_(). It defines the type of the argument for a given + * to aom_codec_control_(). It defines the type of the argument for a given * control identifier. * * \internal @@ -409,15 +409,15 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...); * function. */ #define VPX_CTRL_USE_TYPE(id, typ) \ - static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int, typ) \ + static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *, int, typ) \ UNUSED; \ \ - static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx, \ + static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *ctx, \ int ctrl_id, typ data) { \ - return vpx_codec_control_(ctx, ctrl_id, data); \ + return aom_codec_control_(ctx, ctrl_id, data); \ } /**<\hideinitializer*/ -/*!\brief vpx_codec_control deprecated type definition macro +/*!\brief aom_codec_control deprecated type definition macro * * Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is * deprecated and should not be used. Consult the documentation for your @@ -428,18 +428,18 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...); * wrapper to the type-unsafe internal function. */ #define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \ - DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \ - vpx_codec_ctx_t *, int, typ) DEPRECATED UNUSED; \ + DECLSPEC_DEPRECATED static aom_codec_err_t aom_codec_control_##id( \ + aom_codec_ctx_t *, int, typ) DEPRECATED UNUSED; \ \ - DECLSPEC_DEPRECATED static vpx_codec_err_t vpx_codec_control_##id( \ - vpx_codec_ctx_t *ctx, int ctrl_id, typ data) { \ - return vpx_codec_control_(ctx, ctrl_id, data); \ + DECLSPEC_DEPRECATED static aom_codec_err_t aom_codec_control_##id( \ + aom_codec_ctx_t *ctx, int ctrl_id, typ data) { \ + return aom_codec_control_(ctx, ctrl_id, data); \ } /**<\hideinitializer*/ -/*!\brief vpx_codec_control void type definition macro +/*!\brief aom_codec_control void type definition macro * * This macro allows for type safe conversions across the variadic parameter - * to vpx_codec_control_(). It indicates that a given control identifier takes + * to aom_codec_control_(). It indicates that a given control identifier takes * no argument. * * \internal @@ -447,12 +447,12 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...); * type-unsafe internal function. */ #define VPX_CTRL_VOID(id) \ - static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *, int) \ + static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *, int) \ UNUSED; \ \ - static vpx_codec_err_t vpx_codec_control_##id(vpx_codec_ctx_t *ctx, \ + static aom_codec_err_t aom_codec_control_##id(aom_codec_ctx_t *ctx, \ int ctrl_id) { \ - return vpx_codec_control_(ctx, ctrl_id); \ + return aom_codec_control_(ctx, ctrl_id); \ } /**<\hideinitializer*/ #endif diff --git a/aom/vpx_codec.mk b/aom/aom_codec.mk similarity index 55% rename from aom/vpx_codec.mk rename to aom/aom_codec.mk index 9cf4ac4f2d33caab2dfdc18c5438bcc2dec2a977..8ac60334fec2212c6b3882be1fd9ac96f6458cd8 100644 --- a/aom/vpx_codec.mk +++ b/aom/aom_codec.mk @@ -21,23 +21,23 @@ API_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8.h API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h -API_DOC_SRCS-yes += vpx_codec.h -API_DOC_SRCS-yes += vpx_decoder.h -API_DOC_SRCS-yes += vpx_encoder.h -API_DOC_SRCS-yes += vpx_frame_buffer.h -API_DOC_SRCS-yes += vpx_image.h +API_DOC_SRCS-yes += aom_codec.h +API_DOC_SRCS-yes += aom_decoder.h +API_DOC_SRCS-yes += aom_encoder.h +API_DOC_SRCS-yes += aom_frame_buffer.h +API_DOC_SRCS-yes += aom_image.h -API_SRCS-yes += src/vpx_decoder.c -API_SRCS-yes += vpx_decoder.h -API_SRCS-yes += src/vpx_encoder.c -API_SRCS-yes += vpx_encoder.h -API_SRCS-yes += internal/vpx_codec_internal.h -API_SRCS-yes += internal/vpx_psnr.h -API_SRCS-yes += src/vpx_codec.c -API_SRCS-yes += src/vpx_image.c -API_SRCS-yes += src/vpx_psnr.c -API_SRCS-yes += vpx_codec.h -API_SRCS-yes += vpx_codec.mk -API_SRCS-yes += vpx_frame_buffer.h -API_SRCS-yes += vpx_image.h -API_SRCS-yes += vpx_integer.h +API_SRCS-yes += src/aom_decoder.c +API_SRCS-yes += aom_decoder.h +API_SRCS-yes += src/aom_encoder.c +API_SRCS-yes += aom_encoder.h +API_SRCS-yes += internal/aom_codec_internal.h +API_SRCS-yes += internal/aom_psnr.h +API_SRCS-yes += src/aom_codec.c +API_SRCS-yes += src/aom_image.c +API_SRCS-yes += src/aom_psnr.c +API_SRCS-yes += aom_codec.h +API_SRCS-yes += aom_codec.mk +API_SRCS-yes += aom_frame_buffer.h +API_SRCS-yes += aom_image.h +API_SRCS-yes += aom_integer.h diff --git a/aom/vpx_decoder.h b/aom/aom_decoder.h similarity index 85% rename from aom/vpx_decoder.h rename to aom/aom_decoder.h index 972a09d93cf29d3aeaa602883e2f3bd932c5ab1a..d718054734d9485a3c8d85705b1730790f15351d 100644 --- a/aom/vpx_decoder.h +++ b/aom/aom_decoder.h @@ -30,8 +30,8 @@ extern "C" { #endif -#include "./vpx_codec.h" -#include "./vpx_frame_buffer.h" +#include "./aom_codec.h" +#include "./aom_frame_buffer.h" /*!\brief Current ABI version number * @@ -47,7 +47,7 @@ extern "C" { /*! \brief Decoder capabilities bitfield * * Each decoder advertises the capabilities it supports as part of its - * ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces + * ::aom_codec_iface_t interface structure. Capabilities are extra interfaces * or functionality, and are not required to be supported by a decoder. * * The available flags are specified by VPX_CODEC_CAP_* defines. @@ -86,12 +86,12 @@ extern "C" { * stream. Algorithms may extend this structure with data specific * to their bitstream by setting the sz member appropriately. */ -typedef struct vpx_codec_stream_info { +typedef struct aom_codec_stream_info { unsigned int sz; /**< Size of this structure */ unsigned int w; /**< Width (or 0 for unknown/default) */ unsigned int h; /**< Height (or 0 for unknown/default) */ unsigned int is_kf; /**< Current frame is a keyframe */ -} vpx_codec_stream_info_t; +} aom_codec_stream_info_t; /* REQUIRED FUNCTIONS * @@ -104,16 +104,16 @@ typedef struct vpx_codec_stream_info { * This structure is used to pass init time configuration options to the * decoder. */ -typedef struct vpx_codec_dec_cfg { +typedef struct aom_codec_dec_cfg { unsigned int threads; /**< Maximum number of threads to use, default 1 */ unsigned int w; /**< Width */ unsigned int h; /**< Height */ -} vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */ +} aom_codec_dec_cfg_t; /**< alias for struct aom_codec_dec_cfg */ /*!\brief Initialize a decoder instance * * Initializes a decoder context using the given interface. Applications - * should call the vpx_codec_dec_init convenience macro instead of this + * should call the aom_codec_dec_init convenience macro instead of this * function directly, to ensure that the ABI version number parameter * is properly initialized. * @@ -132,17 +132,17 @@ typedef struct vpx_codec_dec_cfg { * \retval #VPX_CODEC_MEM_ERROR * Memory allocation failed. */ -vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - const vpx_codec_dec_cfg_t *cfg, - vpx_codec_flags_t flags, int ver); +aom_codec_err_t aom_codec_dec_init_ver(aom_codec_ctx_t *ctx, + aom_codec_iface_t *iface, + const aom_codec_dec_cfg_t *cfg, + aom_codec_flags_t flags, int ver); -/*!\brief Convenience macro for vpx_codec_dec_init_ver() +/*!\brief Convenience macro for aom_codec_dec_init_ver() * * Ensures the ABI version parameter is properly set. */ -#define vpx_codec_dec_init(ctx, iface, cfg, flags) \ - vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION) +#define aom_codec_dec_init(ctx, iface, cfg, flags) \ + aom_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION) /*!\brief Parse stream info from a buffer * @@ -161,10 +161,10 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, * \retval #VPX_CODEC_OK * Bitstream is parsable and stream information updated */ -vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, +aom_codec_err_t aom_codec_peek_stream_info(aom_codec_iface_t *iface, const uint8_t *data, unsigned int data_sz, - vpx_codec_stream_info_t *si); + aom_codec_stream_info_t *si); /*!\brief Return information about the current stream. * @@ -179,8 +179,8 @@ vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, * \retval #VPX_CODEC_OK * Bitstream is parsable and stream information updated */ -vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, - vpx_codec_stream_info_t *si); +aom_codec_err_t aom_codec_get_stream_info(aom_codec_ctx_t *ctx, + aom_codec_stream_info_t *si); /*!\brief Decode data * @@ -209,10 +209,10 @@ vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, * * \return Returns #VPX_CODEC_OK if the coded data was processed completely * and future pictures can be decoded without error. Otherwise, - * see the descriptions of the other error codes in ::vpx_codec_err_t + * see the descriptions of the other error codes in ::aom_codec_err_t * for recoverability capabilities. */ -vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, +aom_codec_err_t aom_codec_decode(aom_codec_ctx_t *ctx, const uint8_t *data, unsigned int data_sz, void *user_priv, long deadline); @@ -223,8 +223,8 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, * complete when this function returns NULL. * * The list of available frames becomes valid upon completion of the - * vpx_codec_decode call, and remains valid until the next call to - * vpx_codec_decode. + * aom_codec_decode call, and remains valid until the next call to + * aom_codec_decode. * * \param[in] ctx Pointer to this instance's context * \param[in,out] iter Iterator storage, initialized to NULL @@ -232,7 +232,7 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, * \return Returns a pointer to an image, if one is ready for display. Frames * produced will always be in PTS (presentation time stamp) order. */ -vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter); +aom_image_t *aom_codec_get_frame(aom_codec_ctx_t *ctx, aom_codec_iter_t *iter); /*!\defgroup cap_put_frame Frame-Based Decoding Functions * @@ -249,8 +249,8 @@ vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter); * This callback is invoked by the decoder to notify the application of * the availability of decoded image data. */ -typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv, - const vpx_image_t *img); +typedef void (*aom_codec_put_frame_cb_fn_t)(void *user_priv, + const aom_image_t *img); /*!\brief Register for notification of frame completion. * @@ -267,8 +267,8 @@ typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv, * Decoder context not initialized, or algorithm not capable of * posting slice completion. */ -vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_frame_cb_fn_t cb, +aom_codec_err_t aom_codec_register_put_frame_cb(aom_codec_ctx_t *ctx, + aom_codec_put_frame_cb_fn_t cb, void *user_priv); /*!@} - end defgroup cap_put_frame */ @@ -288,10 +288,10 @@ vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, * This callback is invoked by the decoder to notify the application of * the availability of partially decoded image data. The */ -typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv, - const vpx_image_t *img, - const vpx_image_rect_t *valid, - const vpx_image_rect_t *update); +typedef void (*aom_codec_put_slice_cb_fn_t)(void *user_priv, + const aom_image_t *img, + const aom_image_rect_t *valid, + const aom_image_rect_t *update); /*!\brief Register for notification of slice completion. * @@ -308,8 +308,8 @@ typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv, * Decoder context not initialized, or algorithm not capable of * posting slice completion. */ -vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_slice_cb_fn_t cb, +aom_codec_err_t aom_codec_register_put_slice_cb(aom_codec_ctx_t *ctx, + aom_codec_put_slice_cb_fn_t cb, void *user_priv); /*!@} - end defgroup cap_put_slice*/ @@ -352,9 +352,9 @@ vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, * #VPX_MAXIMUM_WORK_BUFFERS external frame * buffers. */ -vpx_codec_err_t vpx_codec_set_frame_buffer_functions( - vpx_codec_ctx_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get, - vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); +aom_codec_err_t aom_codec_set_frame_buffer_functions( + aom_codec_ctx_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get, + aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); /*!@} - end defgroup cap_external_frame_buffer */ diff --git a/aom/vpx_encoder.h b/aom/aom_encoder.h similarity index 89% rename from aom/vpx_encoder.h rename to aom/aom_encoder.h index 079ff29c7a4ce100293a93d0d446a6b28c4a61ac..8b44c4ec4fe86cbe5441c9ff3ec05e1b06ff785c 100644 --- a/aom/vpx_encoder.h +++ b/aom/aom_encoder.h @@ -30,7 +30,7 @@ extern "C" { #endif -#include "./vpx_codec.h" +#include "./aom_codec.h" /*! Temporal Scalability: Maximum length of the sequence defining frame * layer membership @@ -69,7 +69,7 @@ extern "C" { /*! \brief Encoder capabilities bitfield * * Each encoder advertises the capabilities it supports as part of its - * ::vpx_codec_iface_t interface structure. Capabilities are extra + * ::aom_codec_iface_t interface structure. Capabilities are extra * interfaces or functionality, and are not required to be supported * by an encoder. * @@ -104,17 +104,17 @@ extern "C" { * * This structure is able to hold a reference to any fixed size buffer. */ -typedef struct vpx_fixed_buf { +typedef struct aom_fixed_buf { void *buf; /**< Pointer to the data */ size_t sz; /**< Length of the buffer, in chars */ -} vpx_fixed_buf_t; /**< alias for struct vpx_fixed_buf */ +} aom_fixed_buf_t; /**< alias for struct aom_fixed_buf */ /*!\brief Time Stamp Type * * An integer, which when multiplied by the stream's time base, provides * the absolute time of a sample. */ -typedef int64_t vpx_codec_pts_t; +typedef int64_t aom_codec_pts_t; /*!\brief Compressed Frame Flags * @@ -123,7 +123,7 @@ typedef int64_t vpx_codec_pts_t; * can be used by an algorithm to provide additional detail, for example to * support frame types that are codec specific (MPEG-1 D-frames for example) */ -typedef uint32_t vpx_codec_frame_flags_t; +typedef uint32_t aom_codec_frame_flags_t; #define VPX_FRAME_IS_KEY 0x1 /**< frame is the start of a GOP */ #define VPX_FRAME_IS_DROPPABLE 0x2 /**< frame can be dropped without affecting the stream (no future frame depends @@ -137,9 +137,9 @@ typedef uint32_t vpx_codec_frame_flags_t; * * These flags define which error resilient features to enable in the * encoder. The flags are specified through the - * vpx_codec_enc_cfg::g_error_resilient variable. + * aom_codec_enc_cfg::g_error_resilient variable. */ -typedef uint32_t vpx_codec_er_flags_t; +typedef uint32_t aom_codec_er_flags_t; #define VPX_ERROR_RESILIENT_DEFAULT 0x1 /**< Improve resiliency against losses of whole frames */ #define VPX_ERROR_RESILIENT_PARTITIONS 0x2 @@ -151,10 +151,10 @@ typedef uint32_t vpx_codec_er_flags_t; /*!\brief Encoder output packet variants * * This enumeration lists the different kinds of data packets that can be - * returned by calls to vpx_codec_get_cx_data(). Algorithms \ref MAY + * returned by calls to aom_codec_get_cx_data(). Algorithms \ref MAY * extend this list to provide additional functionality. */ -enum vpx_codec_cx_pkt_kind { +enum aom_codec_cx_pkt_kind { VPX_CODEC_CX_FRAME_PKT, /**< Compressed video frame */ VPX_CODEC_STATS_PKT, /**< Two-pass statistics for this frame */ VPX_CODEC_FPMB_STATS_PKT, /**< first pass mb statistics for this frame */ @@ -173,17 +173,17 @@ enum vpx_codec_cx_pkt_kind { * This structure contains the different kinds of output data the encoder * may produce while compressing a frame. */ -typedef struct vpx_codec_cx_pkt { - enum vpx_codec_cx_pkt_kind kind; /**< packet variant */ +typedef struct aom_codec_cx_pkt { + enum aom_codec_cx_pkt_kind kind; /**< packet variant */ union { struct { void *buf; /**< compressed data buffer */ size_t sz; /**< length of compressed data */ - vpx_codec_pts_t pts; /**< time stamp to show frame + aom_codec_pts_t pts; /**< time stamp to show frame (in timebase units) */ unsigned long duration; /**< duration to show frame (in timebase units) */ - vpx_codec_frame_flags_t flags; /**< flags for this frame */ + aom_codec_frame_flags_t flags; /**< flags for this frame */ int partition_id; /**< the partition id defines the decoding order of the partitions. Only @@ -191,19 +191,19 @@ typedef struct vpx_codec_cx_pkt { mode is enabled. First partition has id 0.*/ } frame; /**< data for compressed frame packet */ - vpx_fixed_buf_t twopass_stats; /**< data for two-pass packet */ - vpx_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */ - struct vpx_psnr_pkt { + aom_fixed_buf_t twopass_stats; /**< data for two-pass packet */ + aom_fixed_buf_t firstpass_mb_stats; /**< first pass mb packet */ + struct aom_psnr_pkt { unsigned int samples[4]; /**< Number of samples, total/y/u/v */ uint64_t sse[4]; /**< sum squared error, total/y/u/v */ double psnr[4]; /**< PSNR, total/y/u/v */ } psnr; /**< data for PSNR packet */ - vpx_fixed_buf_t raw; /**< data for arbitrary packets */ + aom_fixed_buf_t raw; /**< data for arbitrary packets */ // Spatial SVC is still experimental and may be removed before the next // ABI bump. #if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION) size_t layer_sizes[VPX_SS_MAX_LAYERS]; - struct vpx_psnr_pkt layer_psnr[VPX_SS_MAX_LAYERS]; + struct aom_psnr_pkt layer_psnr[VPX_SS_MAX_LAYERS]; #endif /* This packet size is fixed to allow codecs to extend this @@ -211,9 +211,9 @@ typedef struct vpx_codec_cx_pkt { * i.e., if it's smaller than 128 bytes, you can store in the * packet list directly. */ - char pad[128 - sizeof(enum vpx_codec_cx_pkt_kind)]; /**< fixed sz */ + char pad[128 - sizeof(enum aom_codec_cx_pkt_kind)]; /**< fixed sz */ } data; /**< packet data */ -} vpx_codec_cx_pkt_t; /**< alias for struct vpx_codec_cx_pkt */ +} aom_codec_cx_pkt_t; /**< alias for struct aom_codec_cx_pkt */ /*!\brief Encoder return output buffer callback * @@ -222,33 +222,33 @@ typedef struct vpx_codec_cx_pkt { */ // putting the definitions here for now. (agrange: find if there // is a better place for this) -typedef void (*vpx_codec_enc_output_cx_pkt_cb_fn_t)(vpx_codec_cx_pkt_t *pkt, +typedef void (*aom_codec_enc_output_cx_pkt_cb_fn_t)(aom_codec_cx_pkt_t *pkt, void *user_data); /*!\brief Callback function pointer / user data pair storage */ -typedef struct vpx_codec_enc_output_cx_cb_pair { - vpx_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */ +typedef struct aom_codec_enc_output_cx_cb_pair { + aom_codec_enc_output_cx_pkt_cb_fn_t output_cx_pkt; /**< Callback function */ void *user_priv; /**< Pointer to private data */ -} vpx_codec_priv_output_cx_pkt_cb_pair_t; +} aom_codec_priv_output_cx_pkt_cb_pair_t; /*!\brief Rational Number * * This structure holds a fractional value. */ -typedef struct vpx_rational { +typedef struct aom_rational { int num; /**< fraction numerator */ int den; /**< fraction denominator */ -} vpx_rational_t; /**< alias for struct vpx_rational */ +} aom_rational_t; /**< alias for struct aom_rational */ /*!\brief Multi-pass Encoding Pass */ -enum vpx_enc_pass { +enum aom_enc_pass { VPX_RC_ONE_PASS, /**< Single pass mode */ VPX_RC_FIRST_PASS, /**< First pass of multi-pass mode */ VPX_RC_LAST_PASS /**< Final pass of multi-pass mode */ }; /*!\brief Rate control mode */ -enum vpx_rc_mode { +enum aom_rc_mode { VPX_VBR, /**< Variable Bit Rate (VBR) mode */ VPX_CBR, /**< Constant Bit Rate (CBR) mode */ VPX_CQ, /**< Constrained Quality (CQ) mode */ @@ -263,7 +263,7 @@ enum vpx_rc_mode { * This name is confusing for this behavior, so the new symbols to be used * are VPX_KF_AUTO and VPX_KF_DISABLED. */ -enum vpx_kf_mode { +enum aom_kf_mode { VPX_KF_FIXED, /**< deprecated, implies VPX_KF_DISABLED */ VPX_KF_AUTO, /**< Encoder determines optimal placement automatically */ VPX_KF_DISABLED = 0 /**< Encoder does not place keyframes. */ @@ -271,12 +271,12 @@ enum vpx_kf_mode { /*!\brief Encoded Frame Flags * - * This type indicates a bitfield to be passed to vpx_codec_encode(), defining + * This type indicates a bitfield to be passed to aom_codec_encode(), defining * per-frame boolean values. By convention, bits common to all codecs will be * named VPX_EFLAG_*, and bits specific to an algorithm will be named * /algo/_eflag_*. The lower order 16 bits are reserved for common use. */ -typedef long vpx_enc_frame_flags_t; +typedef long aom_enc_frame_flags_t; #define VPX_EFLAG_FORCE_KF (1 << 0) /**< Force this frame to be a keyframe */ /*!\brief Encoder configuration structure @@ -285,7 +285,7 @@ typedef long vpx_enc_frame_flags_t; * across all codecs. This doesn't imply that all codecs support all features, * however. */ -typedef struct vpx_codec_enc_cfg { +typedef struct aom_codec_enc_cfg { /* * generic settings (g) */ @@ -339,9 +339,9 @@ typedef struct vpx_codec_enc_cfg { * * This value identifies the bit_depth of the codec, * Only certain bit-depths are supported as identified in the - * vpx_bit_depth_t enum. + * aom_bit_depth_t enum. */ - vpx_bit_depth_t g_bit_depth; + aom_bit_depth_t g_bit_depth; /*!\brief Bit-depth of the input frames * @@ -363,7 +363,7 @@ typedef struct vpx_codec_enc_cfg { * \ref RECOMMENDED method is to set the timebase to that of the parent * container or multimedia framework (ex: 1/1000 for ms, as in FLV). */ - struct vpx_rational g_timebase; + struct aom_rational g_timebase; /*!\brief Enable error resilient modes. * @@ -371,14 +371,14 @@ typedef struct vpx_codec_enc_cfg { * it should enable to take measures for streaming over lossy or noisy * links. */ - vpx_codec_er_flags_t g_error_resilient; + aom_codec_er_flags_t g_error_resilient; /*!\brief Multi-pass Encoding Mode * * This value should be set to the current phase for multi-pass encoding. * For single pass, set to #VPX_RC_ONE_PASS. */ - enum vpx_enc_pass g_pass; + enum aom_enc_pass g_pass; /*!\brief Allow lagged encoding * @@ -406,7 +406,7 @@ typedef struct vpx_codec_enc_cfg { * trade-off is often acceptable, but for many applications is not. It can * be disabled in these cases. * - * Note that not all codecs support this feature. All vpx VPx codecs do. + * Note that not all codecs support this feature. All aom VPx codecs do. * For other codecs, consult the documentation for that algorithm. * * This threshold is described as a percentage of the target data buffer. @@ -463,21 +463,21 @@ typedef struct vpx_codec_enc_cfg { * bandwidth link, as from a local disk, where higher variations in * bitrate are acceptable. */ - enum vpx_rc_mode rc_end_usage; + enum aom_rc_mode rc_end_usage; /*!\brief Two-pass stats buffer. * * A buffer containing all of the stats packets produced in the first * pass, concatenated. */ - vpx_fixed_buf_t rc_twopass_stats_in; + aom_fixed_buf_t rc_twopass_stats_in; /*!\brief first pass mb stats buffer. * * A buffer containing all of the first pass mb stats packets produced * in the first pass, concatenated. */ - vpx_fixed_buf_t rc_firstpass_mb_stats_in; + aom_fixed_buf_t rc_firstpass_mb_stats_in; /*!\brief Target data rate * @@ -495,7 +495,7 @@ typedef struct vpx_codec_enc_cfg { * encoded image. The range of valid values for the quantizer is codec * specific. Consult the documentation for the codec to determine the * values to use. To determine the range programmatically, call - * vpx_codec_enc_config_default() with a usage value of 0. + * aom_codec_enc_config_default() with a usage value of 0. */ unsigned int rc_min_quantizer; @@ -505,7 +505,7 @@ typedef struct vpx_codec_enc_cfg { * encoded image. The range of valid values for the quantizer is codec * specific. Consult the documentation for the codec to determine the * values to use. To determine the range programmatically, call - * vpx_codec_enc_config_default() with a usage value of 0. + * aom_codec_enc_config_default() with a usage value of 0. */ unsigned int rc_max_quantizer; @@ -609,7 +609,7 @@ typedef struct vpx_codec_enc_cfg { * fixed interval, or determine the optimal placement automatically * (as governed by the #kf_min_dist and #kf_max_dist parameters) */ - enum vpx_kf_mode kf_mode; + enum aom_kf_mode kf_mode; /*!\brief Keyframe minimum interval * @@ -707,25 +707,25 @@ typedef struct vpx_codec_enc_cfg { * */ int temporal_layering_mode; -} vpx_codec_enc_cfg_t; /**< alias for struct vpx_codec_enc_cfg */ +} aom_codec_enc_cfg_t; /**< alias for struct aom_codec_enc_cfg */ /*!\brief vp9 svc extra configure parameters * * This defines max/min quantizers and scale factors for each layer * */ -typedef struct vpx_svc_parameters { +typedef struct aom_svc_parameters { int max_quantizers[VPX_MAX_LAYERS]; /**< Max Q for each layer */ int min_quantizers[VPX_MAX_LAYERS]; /**< Min Q for each layer */ int scaling_factor_num[VPX_MAX_LAYERS]; /**< Scaling factor-numerator */ int scaling_factor_den[VPX_MAX_LAYERS]; /**< Scaling factor-denominator */ int temporal_layering_mode; /**< Temporal layering mode */ -} vpx_svc_extra_cfg_t; +} aom_svc_extra_cfg_t; /*!\brief Initialize an encoder instance * * Initializes a encoder context using the given interface. Applications - * should call the vpx_codec_enc_init convenience macro instead of this + * should call the aom_codec_enc_init convenience macro instead of this * function directly, to ensure that the ABI version number parameter * is properly initialized. * @@ -744,22 +744,22 @@ typedef struct vpx_svc_parameters { * \retval #VPX_CODEC_MEM_ERROR * Memory allocation failed. */ -vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - const vpx_codec_enc_cfg_t *cfg, - vpx_codec_flags_t flags, int ver); +aom_codec_err_t aom_codec_enc_init_ver(aom_codec_ctx_t *ctx, + aom_codec_iface_t *iface, + const aom_codec_enc_cfg_t *cfg, + aom_codec_flags_t flags, int ver); -/*!\brief Convenience macro for vpx_codec_enc_init_ver() +/*!\brief Convenience macro for aom_codec_enc_init_ver() * * Ensures the ABI version parameter is properly set. */ -#define vpx_codec_enc_init(ctx, iface, cfg, flags) \ - vpx_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION) +#define aom_codec_enc_init(ctx, iface, cfg, flags) \ + aom_codec_enc_init_ver(ctx, iface, cfg, flags, VPX_ENCODER_ABI_VERSION) /*!\brief Initialize multi-encoder instance * * Initializes multi-encoder context using the given interface. - * Applications should call the vpx_codec_enc_init_multi convenience macro + * Applications should call the aom_codec_enc_init_multi convenience macro * instead of this function directly, to ensure that the ABI version number * parameter is properly initialized. * @@ -776,16 +776,16 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, * \retval #VPX_CODEC_MEM_ERROR * Memory allocation failed. */ -vpx_codec_err_t vpx_codec_enc_init_multi_ver( - vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg, - int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver); +aom_codec_err_t aom_codec_enc_init_multi_ver( + aom_codec_ctx_t *ctx, aom_codec_iface_t *iface, aom_codec_enc_cfg_t *cfg, + int num_enc, aom_codec_flags_t flags, aom_rational_t *dsf, int ver); -/*!\brief Convenience macro for vpx_codec_enc_init_multi_ver() +/*!\brief Convenience macro for aom_codec_enc_init_multi_ver() * * Ensures the ABI version parameter is properly set. */ -#define vpx_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \ - vpx_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \ +#define aom_codec_enc_init_multi(ctx, iface, cfg, num_enc, flags, dsf) \ + aom_codec_enc_init_multi_ver(ctx, iface, cfg, num_enc, flags, dsf, \ VPX_ENCODER_ABI_VERSION) /*!\brief Get a default configuration @@ -807,8 +807,8 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver( * \retval #VPX_CODEC_INVALID_PARAM * A parameter was NULL, or the usage value was not recognized. */ -vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg, +aom_codec_err_t aom_codec_enc_config_default(aom_codec_iface_t *iface, + aom_codec_enc_cfg_t *cfg, unsigned int reserved); /*!\brief Set or change configuration @@ -825,8 +825,8 @@ vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, * \retval #VPX_CODEC_INVALID_PARAM * A parameter was NULL, or the usage value was not recognized. */ -vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, - const vpx_codec_enc_cfg_t *cfg); +aom_codec_err_t aom_codec_enc_config_set(aom_codec_ctx_t *ctx, + const aom_codec_enc_cfg_t *cfg); /*!\brief Get global stream headers * @@ -839,7 +839,7 @@ vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, * \retval Non-NULL * Pointer to buffer containing global header packet */ -vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx); +aom_fixed_buf_t *aom_codec_get_global_headers(aom_codec_ctx_t *ctx); #define VPX_DL_REALTIME (1) /**< deadline parameter analogous to VPx REALTIME mode. */ @@ -866,8 +866,8 @@ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx); * When the last frame has been passed to the encoder, this function should * continue to be called, with the img parameter set to NULL. This will * signal the end-of-stream condition to the encoder and allow it to encode - * any held buffers. Encoding is complete when vpx_codec_encode() is called - * and vpx_codec_get_cx_data() returns no data. + * any held buffers. Encoding is complete when aom_codec_encode() is called + * and aom_codec_get_cx_data() returns no data. * * \param[in] ctx Pointer to this instance's context * \param[in] img Image data to encode, NULL to flush. @@ -883,9 +883,9 @@ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx); * \retval #VPX_CODEC_INVALID_PARAM * A parameter was NULL, the image format is unsupported, etc. */ -vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, - vpx_codec_pts_t pts, unsigned long duration, - vpx_enc_frame_flags_t flags, +aom_codec_err_t aom_codec_encode(aom_codec_ctx_t *ctx, const aom_image_t *img, + aom_codec_pts_t pts, unsigned long duration, + aom_enc_frame_flags_t flags, unsigned long deadline); /*!\brief Set compressed data output buffer @@ -919,7 +919,7 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, * buffer. * * Applications \ref MUSTNOT call this function during iteration of - * vpx_codec_get_cx_data(). + * aom_codec_get_cx_data(). * * \param[in] ctx Pointer to this instance's context * \param[in] buf Buffer to store compressed data into @@ -931,8 +931,8 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, * \retval #VPX_CODEC_INVALID_PARAM * A parameter was NULL, the image format is unsupported, etc. */ -vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, - const vpx_fixed_buf_t *buf, +aom_codec_err_t aom_codec_set_cx_data_buf(aom_codec_ctx_t *ctx, + const aom_fixed_buf_t *buf, unsigned int pad_before, unsigned int pad_after); @@ -940,7 +940,7 @@ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, * * Iterates over a list of data packets to be passed from the encoder to the * application. The different kinds of packets available are enumerated in - * #vpx_codec_cx_pkt_kind. + * #aom_codec_cx_pkt_kind. * * #VPX_CODEC_CX_FRAME_PKT packets should be passed to the application's * muxer. Multiple compressed frames may be in the list. @@ -950,7 +950,7 @@ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, * not recognize or support. * * The data buffers returned from this function are only guaranteed to be - * valid until the application makes another call to any vpx_codec_* function. + * valid until the application makes another call to any aom_codec_* function. * * \param[in] ctx Pointer to this instance's context * \param[in,out] iter Iterator storage, initialized to NULL @@ -959,8 +959,8 @@ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, * two-pass statistics, etc.) or NULL to signal end-of-list. * */ -const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, - vpx_codec_iter_t *iter); +const aom_codec_cx_pkt_t *aom_codec_get_cx_data(aom_codec_ctx_t *ctx, + aom_codec_iter_t *iter); /*!\brief Get Preview Frame * @@ -974,7 +974,7 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, * available. * */ -const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx); +const aom_image_t *aom_codec_get_preview_frame(aom_codec_ctx_t *ctx); /*!@} - end defgroup encoder*/ #ifdef __cplusplus diff --git a/aom/vpx_frame_buffer.h b/aom/aom_frame_buffer.h similarity index 83% rename from aom/vpx_frame_buffer.h rename to aom/aom_frame_buffer.h index 247768d5ad0cf13c29593397732d4a6b8ff9a566..ce4d64a9ee5635467947136d00f47fa8291c62d0 100644 --- a/aom/vpx_frame_buffer.h +++ b/aom/aom_frame_buffer.h @@ -20,7 +20,7 @@ extern "C" { #endif -#include "./vpx_integer.h" +#include "./aom_integer.h" /*!\brief The maximum number of work buffers used by libaom. * Support maximum 4 threads to decode video in parallel. @@ -37,11 +37,11 @@ extern "C" { * * This structure holds allocated frame buffers used by the decoder. */ -typedef struct vpx_codec_frame_buffer { +typedef struct aom_codec_frame_buffer { uint8_t *data; /**< Pointer to the data buffer */ size_t size; /**< Size of data in bytes */ void *priv; /**< Frame's private data */ -} vpx_codec_frame_buffer_t; +} aom_codec_frame_buffer_t; /*!\brief get frame buffer callback prototype * @@ -52,17 +52,17 @@ typedef struct vpx_codec_frame_buffer { * to the allocated size. The application does not need to align the allocated * data. The callback is triggered when the decoder needs a frame buffer to * decode a compressed image into. This function may be called more than once - * for every call to vpx_codec_decode. The application may set fb->priv to + * for every call to aom_codec_decode. The application may set fb->priv to * some data which will be passed back in the ximage and the release function * call. |fb| is guaranteed to not be NULL. On success the callback must * return 0. Any failure the callback must return a value less than 0. * * \param[in] priv Callback's private data * \param[in] new_size Size in bytes needed by the buffer - * \param[in,out] fb Pointer to vpx_codec_frame_buffer_t + * \param[in,out] fb Pointer to aom_codec_frame_buffer_t */ -typedef int (*vpx_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size, - vpx_codec_frame_buffer_t *fb); +typedef int (*aom_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size, + aom_codec_frame_buffer_t *fb); /*!\brief release frame buffer callback prototype * @@ -72,10 +72,10 @@ typedef int (*vpx_get_frame_buffer_cb_fn_t)(void *priv, size_t min_size, * a value less than 0. * * \param[in] priv Callback's private data - * \param[in] fb Pointer to vpx_codec_frame_buffer_t + * \param[in] fb Pointer to aom_codec_frame_buffer_t */ -typedef int (*vpx_release_frame_buffer_cb_fn_t)(void *priv, - vpx_codec_frame_buffer_t *fb); +typedef int (*aom_release_frame_buffer_cb_fn_t)(void *priv, + aom_codec_frame_buffer_t *fb); #ifdef __cplusplus } // extern "C" diff --git a/aom/vpx_image.h b/aom/aom_image.h similarity index 89% rename from aom/vpx_image.h rename to aom/aom_image.h index 86d31607cddb3081332485182bccfbc9d9a65833..16c1c30714e106727ed7e41ba78db8d62e233034 100644 --- a/aom/vpx_image.h +++ b/aom/aom_image.h @@ -10,7 +10,7 @@ */ /*!\file - * \brief Describes the vpx image descriptor and associated operations + * \brief Describes the aom image descriptor and associated operations * */ #ifndef VPX_VPX_IMAGE_H_ @@ -36,7 +36,7 @@ extern "C" { #define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */ /*!\brief List of supported image formats */ -typedef enum vpx_img_fmt { +typedef enum aom_img_fmt { VPX_IMG_FMT_NONE, VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */ VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */ @@ -55,7 +55,7 @@ typedef enum vpx_img_fmt { VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */ VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2, VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | - 3, /** < planar 4:2:0 format with vpx color space */ + 3, /** < planar 4:2:0 format with aom color space */ VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4, VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5, VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6, @@ -65,10 +65,10 @@ typedef enum vpx_img_fmt { VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH, VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH, VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH -} vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */ +} aom_img_fmt_t; /**< alias for enum aom_img_fmt */ /*!\brief List of supported color spaces */ -typedef enum vpx_color_space { +typedef enum aom_color_space { VPX_CS_UNKNOWN = 0, /**< Unknown */ VPX_CS_BT_601 = 1, /**< BT.601 */ VPX_CS_BT_709 = 2, /**< BT.709 */ @@ -77,19 +77,19 @@ typedef enum vpx_color_space { VPX_CS_BT_2020 = 5, /**< BT.2020 */ VPX_CS_RESERVED = 6, /**< Reserved */ VPX_CS_SRGB = 7 /**< sRGB */ -} vpx_color_space_t; /**< alias for enum vpx_color_space */ +} aom_color_space_t; /**< alias for enum aom_color_space */ /*!\brief List of supported color range */ -typedef enum vpx_color_range { +typedef enum aom_color_range { VPX_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */ VPX_CR_FULL_RANGE = 1 /**< YUV/RGB [0..255] */ -} vpx_color_range_t; /**< alias for enum vpx_color_range */ +} aom_color_range_t; /**< alias for enum aom_color_range */ /**\brief Image Descriptor */ -typedef struct vpx_image { - vpx_img_fmt_t fmt; /**< Image Format */ - vpx_color_space_t cs; /**< Color Space */ - vpx_color_range_t range; /**< Color Range */ +typedef struct aom_image { + aom_img_fmt_t fmt; /**< Image Format */ + aom_color_space_t cs; /**< Color Space */ + aom_color_range_t range; /**< Color Range */ /* Image storage dimensions */ unsigned int w; /**< Stored image width */ @@ -131,15 +131,15 @@ typedef struct vpx_image { int self_allocd; /**< private */ void *fb_priv; /**< Frame buffer data associated with the image. */ -} vpx_image_t; /**< alias for struct vpx_image */ +} aom_image_t; /**< alias for struct aom_image */ /**\brief Representation of a rectangle on a surface */ -typedef struct vpx_image_rect { +typedef struct aom_image_rect { unsigned int x; /**< leftmost column */ unsigned int y; /**< topmost row */ unsigned int w; /**< width */ unsigned int h; /**< height */ -} vpx_image_rect_t; /**< alias for struct vpx_image_rect */ +} aom_image_rect_t; /**< alias for struct aom_image_rect */ /*!\brief Open a descriptor, allocating storage for the underlying image * @@ -159,7 +159,7 @@ typedef struct vpx_image_rect { * parameter is non-null, the value of the img parameter will be * returned. */ -vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt, +aom_image_t *aom_img_alloc(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w, unsigned int d_h, unsigned int align); @@ -182,7 +182,7 @@ vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt, * parameter is non-null, the value of the img parameter will be * returned. */ -vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w, +aom_image_t *aom_img_wrap(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w, unsigned int d_h, unsigned int align, unsigned char *img_data); @@ -199,7 +199,7 @@ vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w, * * \return 0 if the requested rectangle is valid, nonzero otherwise. */ -int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y, +int aom_img_set_rect(aom_image_t *img, unsigned int x, unsigned int y, unsigned int w, unsigned int h); /*!\brief Flip the image vertically (top for bottom) @@ -209,7 +209,7 @@ int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y, * * \param[in] img Image descriptor */ -void vpx_img_flip(vpx_image_t *img); +void aom_img_flip(aom_image_t *img); /*!\brief Close an image descriptor * @@ -217,7 +217,7 @@ void vpx_img_flip(vpx_image_t *img); * * \param[in] img Image descriptor */ -void vpx_img_free(vpx_image_t *img); +void aom_img_free(aom_image_t *img); #ifdef __cplusplus } // extern "C" diff --git a/aom/vpx_integer.h b/aom/aom_integer.h similarity index 100% rename from aom/vpx_integer.h rename to aom/aom_integer.h diff --git a/aom/exports_com b/aom/exports_com index 2ab05099f81acc795c0978c2728c8fbb2b4485c8..0c79fa124b1c18bff6bda80cf17d389d368952df 100644 --- a/aom/exports_com +++ b/aom/exports_com @@ -1,16 +1,16 @@ -text vpx_codec_build_config -text vpx_codec_control_ -text vpx_codec_destroy -text vpx_codec_err_to_string -text vpx_codec_error -text vpx_codec_error_detail -text vpx_codec_get_caps -text vpx_codec_iface_name -text vpx_codec_version -text vpx_codec_version_extra_str -text vpx_codec_version_str -text vpx_img_alloc -text vpx_img_flip -text vpx_img_free -text vpx_img_set_rect -text vpx_img_wrap +text aom_codec_build_config +text aom_codec_control_ +text aom_codec_destroy +text aom_codec_err_to_string +text aom_codec_error +text aom_codec_error_detail +text aom_codec_get_caps +text aom_codec_iface_name +text aom_codec_version +text aom_codec_version_extra_str +text aom_codec_version_str +text aom_img_alloc +text aom_img_flip +text aom_img_free +text aom_img_set_rect +text aom_img_wrap diff --git a/aom/exports_dec b/aom/exports_dec index c694ebae1284bc77a27938914c11bd67d9e7bf83..de8fe449a953b2bc8895015b2fdc5746c84c8d23 100644 --- a/aom/exports_dec +++ b/aom/exports_dec @@ -1,8 +1,8 @@ -text vpx_codec_dec_init_ver -text vpx_codec_decode -text vpx_codec_get_frame -text vpx_codec_get_stream_info -text vpx_codec_peek_stream_info -text vpx_codec_register_put_frame_cb -text vpx_codec_register_put_slice_cb -text vpx_codec_set_frame_buffer_functions +text aom_codec_dec_init_ver +text aom_codec_decode +text aom_codec_get_frame +text aom_codec_get_stream_info +text aom_codec_peek_stream_info +text aom_codec_register_put_frame_cb +text aom_codec_register_put_slice_cb +text aom_codec_set_frame_buffer_functions diff --git a/aom/exports_enc b/aom/exports_enc index e4707ba10822a16c97373242e3da774b1a022f0a..4932c26f2c779ce716019fb1f0dcee0ffd853fd9 100644 --- a/aom/exports_enc +++ b/aom/exports_enc @@ -1,15 +1,15 @@ -text vpx_codec_enc_config_default -text vpx_codec_enc_config_set -text vpx_codec_enc_init_multi_ver -text vpx_codec_enc_init_ver -text vpx_codec_encode -text vpx_codec_get_cx_data -text vpx_codec_get_global_headers -text vpx_codec_get_preview_frame -text vpx_codec_set_cx_data_buf -text vpx_svc_dump_statistics -text vpx_svc_encode -text vpx_svc_get_message -text vpx_svc_init -text vpx_svc_release -text vpx_svc_set_options +text aom_codec_enc_config_default +text aom_codec_enc_config_set +text aom_codec_enc_init_multi_ver +text aom_codec_enc_init_ver +text aom_codec_encode +text aom_codec_get_cx_data +text aom_codec_get_global_headers +text aom_codec_get_preview_frame +text aom_codec_set_cx_data_buf +text aom_svc_dump_statistics +text aom_svc_encode +text aom_svc_get_message +text aom_svc_init +text aom_svc_release +text aom_svc_set_options diff --git a/aom/internal/vpx_codec_internal.h b/aom/internal/aom_codec_internal.h similarity index 68% rename from aom/internal/vpx_codec_internal.h rename to aom/internal/aom_codec_internal.h index be86e802623213680a4ebd270f7090586bf7bf26..5744088462313a9363f7212d14c39d5ac088807e 100644 --- a/aom/internal/vpx_codec_internal.h +++ b/aom/internal/aom_codec_internal.h @@ -20,7 +20,7 @@ * into the global namespace: *
  *     my_codec.c:
- *       vpx_codec_iface_t my_codec = {
+ *       aom_codec_iface_t my_codec = {
  *           "My Codec v1.0",
  *           VPX_CODEC_ALG_ABI_VERSION,
  *           ...
@@ -28,23 +28,23 @@
  *     
* * An application instantiates a specific decoder instance by using - * vpx_codec_init() and a pointer to the algorithm's interface structure: + * aom_codec_init() and a pointer to the algorithm's interface structure: *
  *     my_app.c:
- *       extern vpx_codec_iface_t my_codec;
+ *       extern aom_codec_iface_t my_codec;
  *       {
- *           vpx_codec_ctx_t algo;
- *           res = vpx_codec_init(&algo, &my_codec);
+ *           aom_codec_ctx_t algo;
+ *           res = aom_codec_init(&algo, &my_codec);
  *       }
  *     
* * Once initialized, the instance is manged using other functions from - * the vpx_codec_* family. + * the aom_codec_* family. */ #ifndef VPX_INTERNAL_VPX_CODEC_INTERNAL_H_ #define VPX_INTERNAL_VPX_CODEC_INTERNAL_H_ -#include "../vpx_decoder.h" -#include "../vpx_encoder.h" +#include "../aom_decoder.h" +#include "../aom_encoder.h" #include #ifdef __cplusplus @@ -61,13 +61,13 @@ extern "C" { */ #define VPX_CODEC_INTERNAL_ABI_VERSION (5) /**<\hideinitializer*/ -typedef struct vpx_codec_alg_priv vpx_codec_alg_priv_t; -typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t; +typedef struct aom_codec_alg_priv aom_codec_alg_priv_t; +typedef struct aom_codec_priv_enc_mr_cfg aom_codec_priv_enc_mr_cfg_t; /*!\brief init function pointer prototype * * Performs algorithm-specific initialization of the decoder context. This - * function is called by the generic vpx_codec_init() wrapper function, so + * function is called by the generic aom_codec_init() wrapper function, so * plugins implementing this interface may trust the input parameters to be * properly initialized. * @@ -77,13 +77,13 @@ typedef struct vpx_codec_priv_enc_mr_cfg vpx_codec_priv_enc_mr_cfg_t; * \retval #VPX_CODEC_MEM_ERROR * Memory operation failed. */ -typedef vpx_codec_err_t (*vpx_codec_init_fn_t)( - vpx_codec_ctx_t *ctx, vpx_codec_priv_enc_mr_cfg_t *data); +typedef aom_codec_err_t (*aom_codec_init_fn_t)( + aom_codec_ctx_t *ctx, aom_codec_priv_enc_mr_cfg_t *data); /*!\brief destroy function pointer prototype * * Performs algorithm-specific destruction of the decoder context. This - * function is called by the generic vpx_codec_destroy() wrapper function, + * function is called by the generic aom_codec_destroy() wrapper function, * so plugins implementing this interface may trust the input parameters * to be properly initialized. * @@ -93,12 +93,12 @@ typedef vpx_codec_err_t (*vpx_codec_init_fn_t)( * \retval #VPX_CODEC_MEM_ERROR * Memory operation failed. */ -typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx); +typedef aom_codec_err_t (*aom_codec_destroy_fn_t)(aom_codec_alg_priv_t *ctx); /*!\brief parse stream info function pointer prototype * * Performs high level parsing of the bitstream. This function is called by the - * generic vpx_codec_peek_stream_info() wrapper function, so plugins + * generic aom_codec_peek_stream_info() wrapper function, so plugins * implementing this interface may trust the input parameters to be properly * initialized. * @@ -112,9 +112,9 @@ typedef vpx_codec_err_t (*vpx_codec_destroy_fn_t)(vpx_codec_alg_priv_t *ctx); * \retval #VPX_CODEC_OK * Bitstream is parsable and stream information updated */ -typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data, +typedef aom_codec_err_t (*aom_codec_peek_si_fn_t)(const uint8_t *data, unsigned int data_sz, - vpx_codec_stream_info_t *si); + aom_codec_stream_info_t *si); /*!\brief Return information about the current stream. * @@ -129,8 +129,8 @@ typedef vpx_codec_err_t (*vpx_codec_peek_si_fn_t)(const uint8_t *data, * \retval #VPX_CODEC_OK * Bitstream is parsable and stream information updated */ -typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx, - vpx_codec_stream_info_t *si); +typedef aom_codec_err_t (*aom_codec_get_si_fn_t)(aom_codec_alg_priv_t *ctx, + aom_codec_stream_info_t *si); /*!\brief control function pointer prototype * @@ -138,7 +138,7 @@ typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx, * instance. This can be used to implement features specific to a particular * algorithm. * - * This function is called by the generic vpx_codec_control() wrapper + * This function is called by the generic aom_codec_control() wrapper * function, so plugins implementing this interface may trust the input * parameters to be properly initialized. However, this interface does not * provide type safety for the exchanged data or assign meanings to the @@ -154,31 +154,31 @@ typedef vpx_codec_err_t (*vpx_codec_get_si_fn_t)(vpx_codec_alg_priv_t *ctx, * \retval #VPX_CODEC_OK * The internal state data was deserialized. */ -typedef vpx_codec_err_t (*vpx_codec_control_fn_t)(vpx_codec_alg_priv_t *ctx, +typedef aom_codec_err_t (*aom_codec_control_fn_t)(aom_codec_alg_priv_t *ctx, va_list ap); /*!\brief control function pointer mapping * * This structure stores the mapping between control identifiers and * implementing functions. Each algorithm provides a list of these - * mappings. This list is searched by the vpx_codec_control() wrapper + * mappings. This list is searched by the aom_codec_control() wrapper * function to determine which function to invoke. The special * value {0, NULL} is used to indicate end-of-list, and must be * present. The special value {0, } can be used as a catch-all * mapping. This implies that ctrl_id values chosen by the algorithm * \ref MUST be non-zero. */ -typedef const struct vpx_codec_ctrl_fn_map { +typedef const struct aom_codec_ctrl_fn_map { int ctrl_id; - vpx_codec_control_fn_t fn; -} vpx_codec_ctrl_fn_map_t; + aom_codec_control_fn_t fn; +} aom_codec_ctrl_fn_map_t; /*!\brief decode data function pointer prototype * * Processes a buffer of coded data. If the processing results in a new * decoded frame becoming available, #VPX_CODEC_CB_PUT_SLICE and * #VPX_CODEC_CB_PUT_FRAME events are generated as appropriate. This - * function is called by the generic vpx_codec_decode() wrapper function, + * function is called by the generic aom_codec_decode() wrapper function, * so plugins implementing this interface may trust the input parameters * to be properly initialized. * @@ -190,10 +190,10 @@ typedef const struct vpx_codec_ctrl_fn_map { * * \return Returns #VPX_CODEC_OK if the coded data was processed completely * and future pictures can be decoded without error. Otherwise, - * see the descriptions of the other error codes in ::vpx_codec_err_t + * see the descriptions of the other error codes in ::aom_codec_err_t * for recoverability capabilities. */ -typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, +typedef aom_codec_err_t (*aom_codec_decode_fn_t)(aom_codec_alg_priv_t *ctx, const uint8_t *data, unsigned int data_sz, void *user_priv, @@ -206,8 +206,8 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, * complete when this function returns NULL. * * The list of available frames becomes valid upon completion of the - * vpx_codec_decode call, and remains valid until the next call to - * vpx_codec_decode. + * aom_codec_decode call, and remains valid until the next call to + * aom_codec_decode. * * \param[in] ctx Pointer to this instance's context * \param[in out] iter Iterator storage, initialized to NULL @@ -215,8 +215,8 @@ typedef vpx_codec_err_t (*vpx_codec_decode_fn_t)(vpx_codec_alg_priv_t *ctx, * \return Returns a pointer to an image, if one is ready for display. Frames * produced will always be in PTS (presentation time stamp) order. */ -typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx, - vpx_codec_iter_t *iter); +typedef aom_image_t *(*aom_codec_get_frame_fn_t)(aom_codec_alg_priv_t *ctx, + aom_codec_iter_t *iter); /*!\brief Pass in external frame buffers for the decoder to use. * @@ -244,90 +244,90 @@ typedef vpx_image_t *(*vpx_codec_get_frame_fn_t)(vpx_codec_alg_priv_t *ctx, * #VPX_MAXIMUM_WORK_BUFFERS external frame * buffers. */ -typedef vpx_codec_err_t (*vpx_codec_set_fb_fn_t)( - vpx_codec_alg_priv_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get, - vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); +typedef aom_codec_err_t (*aom_codec_set_fb_fn_t)( + aom_codec_alg_priv_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get, + aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv); -typedef vpx_codec_err_t (*vpx_codec_encode_fn_t)(vpx_codec_alg_priv_t *ctx, - const vpx_image_t *img, - vpx_codec_pts_t pts, +typedef aom_codec_err_t (*aom_codec_encode_fn_t)(aom_codec_alg_priv_t *ctx, + const aom_image_t *img, + aom_codec_pts_t pts, unsigned long duration, - vpx_enc_frame_flags_t flags, + aom_enc_frame_flags_t flags, unsigned long deadline); -typedef const vpx_codec_cx_pkt_t *(*vpx_codec_get_cx_data_fn_t)( - vpx_codec_alg_priv_t *ctx, vpx_codec_iter_t *iter); +typedef const aom_codec_cx_pkt_t *(*aom_codec_get_cx_data_fn_t)( + aom_codec_alg_priv_t *ctx, aom_codec_iter_t *iter); -typedef vpx_codec_err_t (*vpx_codec_enc_config_set_fn_t)( - vpx_codec_alg_priv_t *ctx, const vpx_codec_enc_cfg_t *cfg); -typedef vpx_fixed_buf_t *(*vpx_codec_get_global_headers_fn_t)( - vpx_codec_alg_priv_t *ctx); +typedef aom_codec_err_t (*aom_codec_enc_config_set_fn_t)( + aom_codec_alg_priv_t *ctx, const aom_codec_enc_cfg_t *cfg); +typedef aom_fixed_buf_t *(*aom_codec_get_global_headers_fn_t)( + aom_codec_alg_priv_t *ctx); -typedef vpx_image_t *(*vpx_codec_get_preview_frame_fn_t)( - vpx_codec_alg_priv_t *ctx); +typedef aom_image_t *(*aom_codec_get_preview_frame_fn_t)( + aom_codec_alg_priv_t *ctx); -typedef vpx_codec_err_t (*vpx_codec_enc_mr_get_mem_loc_fn_t)( - const vpx_codec_enc_cfg_t *cfg, void **mem_loc); +typedef aom_codec_err_t (*aom_codec_enc_mr_get_mem_loc_fn_t)( + const aom_codec_enc_cfg_t *cfg, void **mem_loc); /*!\brief usage configuration mapping * * This structure stores the mapping between usage identifiers and * configuration structures. Each algorithm provides a list of these - * mappings. This list is searched by the vpx_codec_enc_config_default() + * mappings. This list is searched by the aom_codec_enc_config_default() * wrapper function to determine which config to return. The special value * {-1, {0}} is used to indicate end-of-list, and must be present. At least * one mapping must be present, in addition to the end-of-list. * */ -typedef const struct vpx_codec_enc_cfg_map { +typedef const struct aom_codec_enc_cfg_map { int usage; - vpx_codec_enc_cfg_t cfg; -} vpx_codec_enc_cfg_map_t; + aom_codec_enc_cfg_t cfg; +} aom_codec_enc_cfg_map_t; /*!\brief Decoder algorithm interface interface * * All decoders \ref MUST expose a variable of this type. */ -struct vpx_codec_iface { +struct aom_codec_iface { const char *name; /**< Identification String */ int abi_version; /**< Implemented ABI version */ - vpx_codec_caps_t caps; /**< Decoder capabilities */ - vpx_codec_init_fn_t init; /**< \copydoc ::vpx_codec_init_fn_t */ - vpx_codec_destroy_fn_t destroy; /**< \copydoc ::vpx_codec_destroy_fn_t */ - vpx_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::vpx_codec_ctrl_fn_map_t */ - struct vpx_codec_dec_iface { - vpx_codec_peek_si_fn_t peek_si; /**< \copydoc ::vpx_codec_peek_si_fn_t */ - vpx_codec_get_si_fn_t get_si; /**< \copydoc ::vpx_codec_get_si_fn_t */ - vpx_codec_decode_fn_t decode; /**< \copydoc ::vpx_codec_decode_fn_t */ - vpx_codec_get_frame_fn_t - get_frame; /**< \copydoc ::vpx_codec_get_frame_fn_t */ - vpx_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::vpx_codec_set_fb_fn_t */ + aom_codec_caps_t caps; /**< Decoder capabilities */ + aom_codec_init_fn_t init; /**< \copydoc ::aom_codec_init_fn_t */ + aom_codec_destroy_fn_t destroy; /**< \copydoc ::aom_codec_destroy_fn_t */ + aom_codec_ctrl_fn_map_t *ctrl_maps; /**< \copydoc ::aom_codec_ctrl_fn_map_t */ + struct aom_codec_dec_iface { + aom_codec_peek_si_fn_t peek_si; /**< \copydoc ::aom_codec_peek_si_fn_t */ + aom_codec_get_si_fn_t get_si; /**< \copydoc ::aom_codec_get_si_fn_t */ + aom_codec_decode_fn_t decode; /**< \copydoc ::aom_codec_decode_fn_t */ + aom_codec_get_frame_fn_t + get_frame; /**< \copydoc ::aom_codec_get_frame_fn_t */ + aom_codec_set_fb_fn_t set_fb_fn; /**< \copydoc ::aom_codec_set_fb_fn_t */ } dec; - struct vpx_codec_enc_iface { + struct aom_codec_enc_iface { int cfg_map_count; - vpx_codec_enc_cfg_map_t - *cfg_maps; /**< \copydoc ::vpx_codec_enc_cfg_map_t */ - vpx_codec_encode_fn_t encode; /**< \copydoc ::vpx_codec_encode_fn_t */ - vpx_codec_get_cx_data_fn_t - get_cx_data; /**< \copydoc ::vpx_codec_get_cx_data_fn_t */ - vpx_codec_enc_config_set_fn_t - cfg_set; /**< \copydoc ::vpx_codec_enc_config_set_fn_t */ - vpx_codec_get_global_headers_fn_t - get_glob_hdrs; /**< \copydoc ::vpx_codec_get_global_headers_fn_t */ - vpx_codec_get_preview_frame_fn_t - get_preview; /**< \copydoc ::vpx_codec_get_preview_frame_fn_t */ - vpx_codec_enc_mr_get_mem_loc_fn_t - mr_get_mem_loc; /**< \copydoc ::vpx_codec_enc_mr_get_mem_loc_fn_t */ + aom_codec_enc_cfg_map_t + *cfg_maps; /**< \copydoc ::aom_codec_enc_cfg_map_t */ + aom_codec_encode_fn_t encode; /**< \copydoc ::aom_codec_encode_fn_t */ + aom_codec_get_cx_data_fn_t + get_cx_data; /**< \copydoc ::aom_codec_get_cx_data_fn_t */ + aom_codec_enc_config_set_fn_t + cfg_set; /**< \copydoc ::aom_codec_enc_config_set_fn_t */ + aom_codec_get_global_headers_fn_t + get_glob_hdrs; /**< \copydoc ::aom_codec_get_global_headers_fn_t */ + aom_codec_get_preview_frame_fn_t + get_preview; /**< \copydoc ::aom_codec_get_preview_frame_fn_t */ + aom_codec_enc_mr_get_mem_loc_fn_t + mr_get_mem_loc; /**< \copydoc ::aom_codec_enc_mr_get_mem_loc_fn_t */ } enc; }; /*!\brief Callback function pointer / user data pair storage */ -typedef struct vpx_codec_priv_cb_pair { +typedef struct aom_codec_priv_cb_pair { union { - vpx_codec_put_frame_cb_fn_t put_frame; - vpx_codec_put_slice_cb_fn_t put_slice; + aom_codec_put_frame_cb_fn_t put_frame; + aom_codec_put_slice_cb_fn_t put_slice; } u; void *user_priv; -} vpx_codec_priv_cb_pair_t; +} aom_codec_priv_cb_pair_t; /*!\brief Instance private storage * @@ -337,18 +337,18 @@ typedef struct vpx_codec_priv_cb_pair { * structure can be made the first member of the algorithm specific structure, * and the pointer cast to the proper type. */ -struct vpx_codec_priv { +struct aom_codec_priv { const char *err_detail; - vpx_codec_flags_t init_flags; + aom_codec_flags_t init_flags; struct { - vpx_codec_priv_cb_pair_t put_frame_cb; - vpx_codec_priv_cb_pair_t put_slice_cb; + aom_codec_priv_cb_pair_t put_frame_cb; + aom_codec_priv_cb_pair_t put_slice_cb; } dec; struct { - vpx_fixed_buf_t cx_data_dst_buf; + aom_fixed_buf_t cx_data_dst_buf; unsigned int cx_data_pad_before; unsigned int cx_data_pad_after; - vpx_codec_cx_pkt_t cx_data_pkt; + aom_codec_cx_pkt_t cx_data_pkt; unsigned int total_encoders; } enc; }; @@ -356,10 +356,10 @@ struct vpx_codec_priv { /* * Multi-resolution encoding internal configuration */ -struct vpx_codec_priv_enc_mr_cfg { +struct aom_codec_priv_enc_mr_cfg { unsigned int mr_total_resolutions; unsigned int mr_encoder_id; - struct vpx_rational mr_down_sampling_factor; + struct aom_rational mr_down_sampling_factor; void *mr_low_res_mode_info; }; @@ -383,44 +383,44 @@ struct vpx_codec_priv_enc_mr_cfg { * macro is provided to define this getter function automatically. */ #define CODEC_INTERFACE(id) \ - vpx_codec_iface_t *id(void) { return &id##_algo; } \ - vpx_codec_iface_t id##_algo + aom_codec_iface_t *id(void) { return &id##_algo; } \ + aom_codec_iface_t id##_algo /* Internal Utility Functions * * The following functions are intended to be used inside algorithms as - * utilities for manipulating vpx_codec_* data structures. + * utilities for manipulating aom_codec_* data structures. */ -struct vpx_codec_pkt_list { +struct aom_codec_pkt_list { unsigned int cnt; unsigned int max; - struct vpx_codec_cx_pkt pkts[1]; + struct aom_codec_cx_pkt pkts[1]; }; -#define vpx_codec_pkt_list_decl(n) \ +#define aom_codec_pkt_list_decl(n) \ union { \ - struct vpx_codec_pkt_list head; \ + struct aom_codec_pkt_list head; \ struct { \ - struct vpx_codec_pkt_list head; \ - struct vpx_codec_cx_pkt pkts[n]; \ + struct aom_codec_pkt_list head; \ + struct aom_codec_cx_pkt pkts[n]; \ } alloc; \ } -#define vpx_codec_pkt_list_init(m) \ +#define aom_codec_pkt_list_init(m) \ (m)->alloc.head.cnt = 0, \ (m)->alloc.head.max = sizeof((m)->alloc.pkts) / sizeof((m)->alloc.pkts[0]) -int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *, - const struct vpx_codec_cx_pkt *); +int aom_codec_pkt_list_add(struct aom_codec_pkt_list *, + const struct aom_codec_cx_pkt *); -const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get( - struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter); +const aom_codec_cx_pkt_t *aom_codec_pkt_list_get( + struct aom_codec_pkt_list *list, aom_codec_iter_t *iter); #include #include -struct vpx_internal_error_info { - vpx_codec_err_t error_code; +struct aom_internal_error_info { + aom_codec_err_t error_code; int has_detail; char detail[80]; int setjmp; @@ -435,8 +435,8 @@ struct vpx_internal_error_info { #endif #endif -void vpx_internal_error(struct vpx_internal_error_info *info, - vpx_codec_err_t error, const char *fmt, +void aom_internal_error(struct aom_internal_error_info *info, + aom_codec_err_t error, const char *fmt, ...) CLANG_ANALYZER_NORETURN; #ifdef __cplusplus diff --git a/aom/internal/vpx_psnr.h b/aom/internal/aom_psnr.h similarity index 88% rename from aom/internal/vpx_psnr.h rename to aom/internal/aom_psnr.h index 010ca9f477c0f04f22d72ece220bd110dffc555c..b42b55d86ddc42490557ac7509115fb2cf257e80 100644 --- a/aom/internal/vpx_psnr.h +++ b/aom/internal/aom_psnr.h @@ -16,7 +16,7 @@ extern "C" { #endif -// TODO(dkovalev) change vpx_sse_to_psnr signature: double -> int64_t +// TODO(dkovalev) change aom_sse_to_psnr signature: double -> int64_t /*!\brief Converts SSE to PSNR * @@ -26,7 +26,7 @@ extern "C" { * \param[in] peak Max sample value * \param[in] sse Sum of squared errors */ -double vpx_sse_to_psnr(double samples, double peak, double sse); +double aom_sse_to_psnr(double samples, double peak, double sse); #ifdef __cplusplus } // extern "C" diff --git a/aom/src/vpx_codec.c b/aom/src/aom_codec.c similarity index 70% rename from aom/src/vpx_codec.c rename to aom/src/aom_codec.c index 53f20148beeef951b27c17a207d4eebc7faf2252..119bd1d99073d023b6098654ac9d747200aa0dae 100644 --- a/aom/src/vpx_codec.c +++ b/aom/src/aom_codec.c @@ -15,23 +15,23 @@ */ #include #include -#include "aom/vpx_integer.h" -#include "aom/internal/vpx_codec_internal.h" -#include "vpx_version.h" +#include "aom/aom_integer.h" +#include "aom/internal/aom_codec_internal.h" +#include "aom_version.h" #define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var) -int vpx_codec_version(void) { return VERSION_PACKED; } +int aom_codec_version(void) { return VERSION_PACKED; } -const char *vpx_codec_version_str(void) { return VERSION_STRING_NOSP; } +const char *aom_codec_version_str(void) { return VERSION_STRING_NOSP; } -const char *vpx_codec_version_extra_str(void) { return VERSION_EXTRA; } +const char *aom_codec_version_extra_str(void) { return VERSION_EXTRA; } -const char *vpx_codec_iface_name(vpx_codec_iface_t *iface) { +const char *aom_codec_iface_name(aom_codec_iface_t *iface) { return iface ? iface->name : ""; } -const char *vpx_codec_err_to_string(vpx_codec_err_t err) { +const char *aom_codec_err_to_string(aom_codec_err_t err) { switch (err) { case VPX_CODEC_OK: return "Success"; case VPX_CODEC_ERROR: return "Unspecified internal error"; @@ -51,27 +51,27 @@ const char *vpx_codec_err_to_string(vpx_codec_err_t err) { return "Unrecognized error code"; } -const char *vpx_codec_error(vpx_codec_ctx_t *ctx) { - return (ctx) ? vpx_codec_err_to_string(ctx->err) - : vpx_codec_err_to_string(VPX_CODEC_INVALID_PARAM); +const char *aom_codec_error(aom_codec_ctx_t *ctx) { + return (ctx) ? aom_codec_err_to_string(ctx->err) + : aom_codec_err_to_string(VPX_CODEC_INVALID_PARAM); } -const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx) { +const char *aom_codec_error_detail(aom_codec_ctx_t *ctx) { if (ctx && ctx->err) return ctx->priv ? ctx->priv->err_detail : ctx->err_detail; return NULL; } -vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_destroy(aom_codec_ctx_t *ctx) { + aom_codec_err_t res; if (!ctx) res = VPX_CODEC_INVALID_PARAM; else if (!ctx->iface || !ctx->priv) res = VPX_CODEC_ERROR; else { - ctx->iface->destroy((vpx_codec_alg_priv_t *)ctx->priv); + ctx->iface->destroy((aom_codec_alg_priv_t *)ctx->priv); ctx->iface = NULL; ctx->name = NULL; @@ -82,19 +82,19 @@ vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx) { return SAVE_STATUS(ctx, res); } -vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface) { +aom_codec_caps_t aom_codec_get_caps(aom_codec_iface_t *iface) { return (iface) ? iface->caps : 0; } -vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_control_(aom_codec_ctx_t *ctx, int ctrl_id, ...) { + aom_codec_err_t res; if (!ctx || !ctrl_id) res = VPX_CODEC_INVALID_PARAM; else if (!ctx->iface || !ctx->priv || !ctx->iface->ctrl_maps) res = VPX_CODEC_ERROR; else { - vpx_codec_ctrl_fn_map_t *entry; + aom_codec_ctrl_fn_map_t *entry; res = VPX_CODEC_ERROR; @@ -103,7 +103,7 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...) { va_list ap; va_start(ap, ctrl_id); - res = entry->fn((vpx_codec_alg_priv_t *)ctx->priv, ap); + res = entry->fn((aom_codec_alg_priv_t *)ctx->priv, ap); va_end(ap); break; } @@ -113,8 +113,8 @@ vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx, int ctrl_id, ...) { return SAVE_STATUS(ctx, res); } -void vpx_internal_error(struct vpx_internal_error_info *info, - vpx_codec_err_t error, const char *fmt, ...) { +void aom_internal_error(struct aom_internal_error_info *info, + aom_codec_err_t error, const char *fmt, ...) { va_list ap; info->error_code = error; diff --git a/aom/src/vpx_decoder.c b/aom/src/aom_decoder.c similarity index 74% rename from aom/src/vpx_decoder.c rename to aom/src/aom_decoder.c index ba64a87255c4130c15df061c3fb9b9b5b325410a..8356ee922225f4bc68c0da9d25aed383d69947fe 100644 --- a/aom/src/vpx_decoder.c +++ b/aom/src/aom_decoder.c @@ -14,19 +14,19 @@ * */ #include -#include "aom/internal/vpx_codec_internal.h" +#include "aom/internal/aom_codec_internal.h" #define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var) -static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) { - return (vpx_codec_alg_priv_t *)ctx->priv; +static aom_codec_alg_priv_t *get_alg_priv(aom_codec_ctx_t *ctx) { + return (aom_codec_alg_priv_t *)ctx->priv; } -vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - const vpx_codec_dec_cfg_t *cfg, - vpx_codec_flags_t flags, int ver) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_dec_init_ver(aom_codec_ctx_t *ctx, + aom_codec_iface_t *iface, + const aom_codec_dec_cfg_t *cfg, + aom_codec_flags_t flags, int ver) { + aom_codec_err_t res; if (ver != VPX_DECODER_ABI_VERSION) res = VPX_CODEC_ABI_MISMATCH; @@ -56,21 +56,21 @@ vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx, res = ctx->iface->init(ctx, NULL); if (res) { ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL; - vpx_codec_destroy(ctx); + aom_codec_destroy(ctx); } } return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, +aom_codec_err_t aom_codec_peek_stream_info(aom_codec_iface_t *iface, const uint8_t *data, unsigned int data_sz, - vpx_codec_stream_info_t *si) { - vpx_codec_err_t res; + aom_codec_stream_info_t *si) { + aom_codec_err_t res; if (!iface || !data || !data_sz || !si || - si->sz < sizeof(vpx_codec_stream_info_t)) + si->sz < sizeof(aom_codec_stream_info_t)) res = VPX_CODEC_INVALID_PARAM; else { /* Set default/unknown values */ @@ -83,11 +83,11 @@ vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface, return res; } -vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, - vpx_codec_stream_info_t *si) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_get_stream_info(aom_codec_ctx_t *ctx, + aom_codec_stream_info_t *si) { + aom_codec_err_t res; - if (!ctx || !si || si->sz < sizeof(vpx_codec_stream_info_t)) + if (!ctx || !si || si->sz < sizeof(aom_codec_stream_info_t)) res = VPX_CODEC_INVALID_PARAM; else if (!ctx->iface || !ctx->priv) res = VPX_CODEC_ERROR; @@ -102,10 +102,10 @@ vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, +aom_codec_err_t aom_codec_decode(aom_codec_ctx_t *ctx, const uint8_t *data, unsigned int data_sz, void *user_priv, long deadline) { - vpx_codec_err_t res; + aom_codec_err_t res; /* Sanity checks */ /* NULL data ptr allowed if data_sz is 0 too */ @@ -121,8 +121,8 @@ vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data, return SAVE_STATUS(ctx, res); } -vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter) { - vpx_image_t *img; +aom_image_t *aom_codec_get_frame(aom_codec_ctx_t *ctx, aom_codec_iter_t *iter) { + aom_image_t *img; if (!ctx || !iter || !ctx->iface || !ctx->priv) img = NULL; @@ -132,10 +132,10 @@ vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, vpx_codec_iter_t *iter) { return img; } -vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_frame_cb_fn_t cb, +aom_codec_err_t aom_codec_register_put_frame_cb(aom_codec_ctx_t *ctx, + aom_codec_put_frame_cb_fn_t cb, void *user_priv) { - vpx_codec_err_t res; + aom_codec_err_t res; if (!ctx || !cb) res = VPX_CODEC_INVALID_PARAM; @@ -151,10 +151,10 @@ vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, - vpx_codec_put_slice_cb_fn_t cb, +aom_codec_err_t aom_codec_register_put_slice_cb(aom_codec_ctx_t *ctx, + aom_codec_put_slice_cb_fn_t cb, void *user_priv) { - vpx_codec_err_t res; + aom_codec_err_t res; if (!ctx || !cb) res = VPX_CODEC_INVALID_PARAM; @@ -170,10 +170,10 @@ vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_set_frame_buffer_functions( - vpx_codec_ctx_t *ctx, vpx_get_frame_buffer_cb_fn_t cb_get, - vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_set_frame_buffer_functions( + aom_codec_ctx_t *ctx, aom_get_frame_buffer_cb_fn_t cb_get, + aom_release_frame_buffer_cb_fn_t cb_release, void *cb_priv) { + aom_codec_err_t res; if (!ctx || !cb_get || !cb_release) { res = VPX_CODEC_INVALID_PARAM; diff --git a/aom/src/vpx_encoder.c b/aom/src/aom_encoder.c similarity index 80% rename from aom/src/vpx_encoder.c rename to aom/src/aom_encoder.c index addc0007985f2a696164b1b51812defb74e04022..5ac56cc3899dca40cad475955166bcae7600a04a 100644 --- a/aom/src/vpx_encoder.c +++ b/aom/src/aom_encoder.c @@ -15,20 +15,20 @@ */ #include #include -#include "vpx_config.h" -#include "aom/internal/vpx_codec_internal.h" +#include "aom_config.h" +#include "aom/internal/aom_codec_internal.h" #define SAVE_STATUS(ctx, var) (ctx ? (ctx->err = var) : var) -static vpx_codec_alg_priv_t *get_alg_priv(vpx_codec_ctx_t *ctx) { - return (vpx_codec_alg_priv_t *)ctx->priv; +static aom_codec_alg_priv_t *get_alg_priv(aom_codec_ctx_t *ctx) { + return (aom_codec_alg_priv_t *)ctx->priv; } -vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, - vpx_codec_iface_t *iface, - const vpx_codec_enc_cfg_t *cfg, - vpx_codec_flags_t flags, int ver) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_enc_init_ver(aom_codec_ctx_t *ctx, + aom_codec_iface_t *iface, + const aom_codec_enc_cfg_t *cfg, + aom_codec_flags_t flags, int ver) { + aom_codec_err_t res; if (ver != VPX_ENCODER_ABI_VERSION) res = VPX_CODEC_ABI_MISMATCH; @@ -53,17 +53,17 @@ vpx_codec_err_t vpx_codec_enc_init_ver(vpx_codec_ctx_t *ctx, if (res) { ctx->err_detail = ctx->priv ? ctx->priv->err_detail : NULL; - vpx_codec_destroy(ctx); + aom_codec_destroy(ctx); } } return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_enc_init_multi_ver( - vpx_codec_ctx_t *ctx, vpx_codec_iface_t *iface, vpx_codec_enc_cfg_t *cfg, - int num_enc, vpx_codec_flags_t flags, vpx_rational_t *dsf, int ver) { - vpx_codec_err_t res = VPX_CODEC_OK; +aom_codec_err_t aom_codec_enc_init_multi_ver( + aom_codec_ctx_t *ctx, aom_codec_iface_t *iface, aom_codec_enc_cfg_t *cfg, + int num_enc, aom_codec_flags_t flags, aom_rational_t *dsf, int ver) { + aom_codec_err_t res = VPX_CODEC_OK; if (ver != VPX_ENCODER_ABI_VERSION) res = VPX_CODEC_ABI_MISMATCH; @@ -84,7 +84,7 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver( if (!(res = iface->enc.mr_get_mem_loc(cfg, &mem_loc))) { for (i = 0; i < num_enc; i++) { - vpx_codec_priv_enc_mr_cfg_t mr_cfg; + aom_codec_priv_enc_mr_cfg_t mr_cfg; /* Validate down-sampling factor. */ if (dsf->num < 1 || dsf->num > 4096 || dsf->den < 1 || @@ -116,13 +116,13 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver( const char *error_detail = ctx->priv ? ctx->priv->err_detail : NULL; /* Destroy current ctx */ ctx->err_detail = error_detail; - vpx_codec_destroy(ctx); + aom_codec_destroy(ctx); /* Destroy already allocated high-level ctx */ while (i) { ctx--; ctx->err_detail = error_detail; - vpx_codec_destroy(ctx); + aom_codec_destroy(ctx); i--; } } @@ -140,11 +140,11 @@ vpx_codec_err_t vpx_codec_enc_init_multi_ver( return SAVE_STATUS(ctx, res); } -vpx_codec_err_t vpx_codec_enc_config_default(vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg, +aom_codec_err_t aom_codec_enc_config_default(aom_codec_iface_t *iface, + aom_codec_enc_cfg_t *cfg, unsigned int usage) { - vpx_codec_err_t res; - vpx_codec_enc_cfg_map_t *map; + aom_codec_err_t res; + aom_codec_enc_cfg_map_t *map; int i; if (!iface || !cfg || usage > INT_MAX) @@ -186,11 +186,11 @@ static void FLOATING_POINT_INIT() {} static void FLOATING_POINT_RESTORE() {} #endif -vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, - vpx_codec_pts_t pts, unsigned long duration, - vpx_enc_frame_flags_t flags, +aom_codec_err_t aom_codec_encode(aom_codec_ctx_t *ctx, const aom_image_t *img, + aom_codec_pts_t pts, unsigned long duration, + aom_enc_frame_flags_t flags, unsigned long deadline) { - vpx_codec_err_t res = VPX_CODEC_OK; + aom_codec_err_t res = VPX_CODEC_OK; if (!ctx || (img && !duration)) res = VPX_CODEC_INVALID_PARAM; @@ -237,9 +237,9 @@ vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img, return SAVE_STATUS(ctx, res); } -const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, - vpx_codec_iter_t *iter) { - const vpx_codec_cx_pkt_t *pkt = NULL; +const aom_codec_cx_pkt_t *aom_codec_get_cx_data(aom_codec_ctx_t *ctx, + aom_codec_iter_t *iter) { + const aom_codec_cx_pkt_t *pkt = NULL; if (ctx) { if (!iter) @@ -256,14 +256,14 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, // If the application has specified a destination area for the // compressed data, and the codec has not placed the data there, // and it fits, copy it. - vpx_codec_priv_t *const priv = ctx->priv; + aom_codec_priv_t *const priv = ctx->priv; char *const dst_buf = (char *)priv->enc.cx_data_dst_buf.buf; if (dst_buf && pkt->data.raw.buf != dst_buf && pkt->data.raw.sz + priv->enc.cx_data_pad_before + priv->enc.cx_data_pad_after <= priv->enc.cx_data_dst_buf.sz) { - vpx_codec_cx_pkt_t *modified_pkt = &priv->enc.cx_data_pkt; + aom_codec_cx_pkt_t *modified_pkt = &priv->enc.cx_data_pkt; memcpy(dst_buf + priv->enc.cx_data_pad_before, pkt->data.raw.buf, pkt->data.raw.sz); @@ -283,8 +283,8 @@ const vpx_codec_cx_pkt_t *vpx_codec_get_cx_data(vpx_codec_ctx_t *ctx, return pkt; } -vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, - const vpx_fixed_buf_t *buf, +aom_codec_err_t aom_codec_set_cx_data_buf(aom_codec_ctx_t *ctx, + const aom_fixed_buf_t *buf, unsigned int pad_before, unsigned int pad_after) { if (!ctx || !ctx->priv) return VPX_CODEC_INVALID_PARAM; @@ -303,8 +303,8 @@ vpx_codec_err_t vpx_codec_set_cx_data_buf(vpx_codec_ctx_t *ctx, return VPX_CODEC_OK; } -const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) { - vpx_image_t *img = NULL; +const aom_image_t *aom_codec_get_preview_frame(aom_codec_ctx_t *ctx) { + aom_image_t *img = NULL; if (ctx) { if (!ctx->iface || !ctx->priv) @@ -320,8 +320,8 @@ const vpx_image_t *vpx_codec_get_preview_frame(vpx_codec_ctx_t *ctx) { return img; } -vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) { - vpx_fixed_buf_t *buf = NULL; +aom_fixed_buf_t *aom_codec_get_global_headers(aom_codec_ctx_t *ctx) { + aom_fixed_buf_t *buf = NULL; if (ctx) { if (!ctx->iface || !ctx->priv) @@ -337,9 +337,9 @@ vpx_fixed_buf_t *vpx_codec_get_global_headers(vpx_codec_ctx_t *ctx) { return buf; } -vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, - const vpx_codec_enc_cfg_t *cfg) { - vpx_codec_err_t res; +aom_codec_err_t aom_codec_enc_config_set(aom_codec_ctx_t *ctx, + const aom_codec_enc_cfg_t *cfg) { + aom_codec_err_t res; if (!ctx || !ctx->iface || !ctx->priv || !cfg) res = VPX_CODEC_INVALID_PARAM; @@ -351,8 +351,8 @@ vpx_codec_err_t vpx_codec_enc_config_set(vpx_codec_ctx_t *ctx, return SAVE_STATUS(ctx, res); } -int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list, - const struct vpx_codec_cx_pkt *pkt) { +int aom_codec_pkt_list_add(struct aom_codec_pkt_list *list, + const struct aom_codec_cx_pkt *pkt) { if (list->cnt < list->max) { list->pkts[list->cnt++] = *pkt; return 0; @@ -361,15 +361,15 @@ int vpx_codec_pkt_list_add(struct vpx_codec_pkt_list *list, return 1; } -const vpx_codec_cx_pkt_t *vpx_codec_pkt_list_get( - struct vpx_codec_pkt_list *list, vpx_codec_iter_t *iter) { - const vpx_codec_cx_pkt_t *pkt; +const aom_codec_cx_pkt_t *aom_codec_pkt_list_get( + struct aom_codec_pkt_list *list, aom_codec_iter_t *iter) { + const aom_codec_cx_pkt_t *pkt; if (!(*iter)) { *iter = list->pkts; } - pkt = (const vpx_codec_cx_pkt_t *)*iter; + pkt = (const aom_codec_cx_pkt_t *)*iter; if ((size_t)(pkt - list->pkts) < list->cnt) *iter = pkt + 1; diff --git a/aom/src/vpx_image.c b/aom/src/aom_image.c similarity index 91% rename from aom/src/vpx_image.c rename to aom/src/aom_image.c index a6d7674ad592fb2f08fd8b0f23b7d3783b3cac4e..5ef51d12d84ac1869771b3680031b21e92f9bb60 100644 --- a/aom/src/vpx_image.c +++ b/aom/src/aom_image.c @@ -12,11 +12,11 @@ #include #include -#include "aom/vpx_image.h" -#include "aom/vpx_integer.h" -#include "aom_mem/vpx_mem.h" +#include "aom/aom_image.h" +#include "aom/aom_integer.h" +#include "aom_mem/aom_mem.h" -static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt, +static aom_image_t *img_alloc_helper(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w, unsigned int d_h, unsigned int buf_align, unsigned int stride_align, @@ -100,13 +100,13 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt, /* Allocate the new image */ if (!img) { - img = (vpx_image_t *)calloc(1, sizeof(vpx_image_t)); + img = (aom_image_t *)calloc(1, sizeof(aom_image_t)); if (!img) goto fail; img->self_allocd = 1; } else { - memset(img, 0, sizeof(vpx_image_t)); + memset(img, 0, sizeof(aom_image_t)); } img->img_data = img_data; @@ -118,7 +118,7 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt, if (alloc_size != (size_t)alloc_size) goto fail; - img->img_data = (uint8_t *)vpx_memalign(buf_align, (size_t)alloc_size); + img->img_data = (uint8_t *)aom_memalign(buf_align, (size_t)alloc_size); img->img_data_owner = 1; } @@ -137,20 +137,20 @@ static vpx_image_t *img_alloc_helper(vpx_image_t *img, vpx_img_fmt_t fmt, img->stride[VPX_PLANE_U] = img->stride[VPX_PLANE_V] = stride_in_bytes >> xcs; /* Default viewport to entire image */ - if (!vpx_img_set_rect(img, 0, 0, d_w, d_h)) return img; + if (!aom_img_set_rect(img, 0, 0, d_w, d_h)) return img; fail: - vpx_img_free(img); + aom_img_free(img); return NULL; } -vpx_image_t *vpx_img_alloc(vpx_image_t *img, vpx_img_fmt_t fmt, +aom_image_t *aom_img_alloc(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w, unsigned int d_h, unsigned int align) { return img_alloc_helper(img, fmt, d_w, d_h, align, align, NULL); } -vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w, +aom_image_t *aom_img_wrap(aom_image_t *img, aom_img_fmt_t fmt, unsigned int d_w, unsigned int d_h, unsigned int stride_align, unsigned char *img_data) { /* By setting buf_align = 1, we don't change buffer alignment in this @@ -158,7 +158,7 @@ vpx_image_t *vpx_img_wrap(vpx_image_t *img, vpx_img_fmt_t fmt, unsigned int d_w, return img_alloc_helper(img, fmt, d_w, d_h, 1, stride_align, img_data); } -int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y, +int aom_img_set_rect(aom_image_t *img, unsigned int x, unsigned int y, unsigned int w, unsigned int h) { unsigned char *data; @@ -208,7 +208,7 @@ int vpx_img_set_rect(vpx_image_t *img, unsigned int x, unsigned int y, return -1; } -void vpx_img_flip(vpx_image_t *img) { +void aom_img_flip(aom_image_t *img) { /* Note: In the calculation pointer adjustment calculation, we want the * rhs to be promoted to a signed type. Section 6.3.1.8 of the ISO C99 * standard indicates that if the adjustment parameter is unsigned, the @@ -231,9 +231,9 @@ void vpx_img_flip(vpx_image_t *img) { img->stride[VPX_PLANE_ALPHA] = -img->stride[VPX_PLANE_ALPHA]; } -void vpx_img_free(vpx_image_t *img) { +void aom_img_free(aom_image_t *img) { if (img) { - if (img->img_data && img->img_data_owner) vpx_free(img->img_data); + if (img->img_data && img->img_data_owner) aom_free(img->img_data); if (img->self_allocd) free(img); } diff --git a/aom/src/vpx_psnr.c b/aom/src/aom_psnr.c similarity index 88% rename from aom/src/vpx_psnr.c rename to aom/src/aom_psnr.c index b3a1d7bd00db1803e9727d01656c7cb1aa2d4b0a..2049921fd23049bea6d89f3724dd8bfaa7db1b83 100644 --- a/aom/src/vpx_psnr.c +++ b/aom/src/aom_psnr.c @@ -11,11 +11,11 @@ #include -#include "aom/internal/vpx_psnr.h" +#include "aom/internal/aom_psnr.h" #define MAX_PSNR 100.0 -double vpx_sse_to_psnr(double samples, double peak, double sse) { +double aom_sse_to_psnr(double samples, double peak, double sse) { if (sse > 0.0) { const double psnr = 10.0 * log10(samples * peak * peak / sse); return psnr > MAX_PSNR ? MAX_PSNR : psnr; diff --git a/aom/src/svc_encodeframe.c b/aom/src/svc_encodeframe.c index 38d73b2bd597658782154b93006bd7e5e97b8b90..0d2637a61e6191f6c717b86eefe458179afbe2f4 100644 --- a/aom/src/svc_encodeframe.c +++ b/aom/src/svc_encodeframe.c @@ -22,11 +22,11 @@ #include #include #define VPX_DISABLE_CTRL_TYPECHECKS 1 -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom/svc_context.h" #include "aom/vp8cx.h" -#include "aom/vpx_encoder.h" -#include "aom_mem/vpx_mem.h" +#include "aom/aom_encoder.h" +#include "aom_mem/aom_mem.h" #include "av1/common/onyxc_int.h" #ifdef __MINGW32__ @@ -71,7 +71,7 @@ static const int option_min_values[ALL_OPTION_TYPES] = { 0, 0, 1, 0 }; typedef struct FrameData { void *buf; // compressed data buffer size_t size; // length of compressed data - vpx_codec_frame_flags_t flags; /**< flags for this frame */ + aom_codec_frame_flags_t flags; /**< flags for this frame */ struct FrameData *next; } FrameData; @@ -125,7 +125,7 @@ static int svc_log(SvcContext *svc_ctx, SVC_LOG_LEVEL level, const char *fmt, return retval; } -static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type, char *input, +static aom_codec_err_t extract_option(LAYER_OPTION_TYPE type, char *input, int *value0, int *value1) { if (type == SCALE_FACTOR) { *value0 = strtol(input, &input, 10); @@ -146,13 +146,13 @@ static vpx_codec_err_t extract_option(LAYER_OPTION_TYPE type, char *input, return VPX_CODEC_OK; } -static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx, +static aom_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx, LAYER_OPTION_TYPE type, const char *input, int *option0, int *option1) { int i; - vpx_codec_err_t res = VPX_CODEC_OK; + aom_codec_err_t res = VPX_CODEC_OK; char *input_string; char *token; const char *delim = ","; @@ -191,13 +191,13 @@ static vpx_codec_err_t parse_layer_options_from_string(SvcContext *svc_ctx, * quantizers=,,... * svc_mode = [i|ip|alt_ip|gf] */ -static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) { +static aom_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) { char *input_string; char *option_name; char *option_value; char *input_ptr; SvcInternal_t *const si = get_svc_internal(svc_ctx); - vpx_codec_err_t res = VPX_CODEC_OK; + aom_codec_err_t res = VPX_CODEC_OK; int i, alt_ref_enabled = 0; if (options == NULL) return VPX_CODEC_OK; @@ -278,7 +278,7 @@ static vpx_codec_err_t parse_options(SvcContext *svc_ctx, const char *options) { return res; } -vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options) { +aom_codec_err_t aom_svc_set_options(SvcContext *svc_ctx, const char *options) { SvcInternal_t *const si = get_svc_internal(svc_ctx); if (svc_ctx == NULL || options == NULL || si == NULL) { return VPX_CODEC_INVALID_PARAM; @@ -289,7 +289,7 @@ vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options) { } void assign_layer_bitrates(const SvcContext *svc_ctx, - vpx_codec_enc_cfg_t *const enc_cfg) { + aom_codec_enc_cfg_t *const enc_cfg) { int i; const SvcInternal_t *const si = get_const_svc_internal(svc_ctx); int sl, tl, spatial_layer_target; @@ -373,10 +373,10 @@ void assign_layer_bitrates(const SvcContext *svc_ctx, } } -vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, - vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *enc_cfg) { - vpx_codec_err_t res; +aom_codec_err_t aom_svc_init(SvcContext *svc_ctx, aom_codec_ctx_t *codec_ctx, + aom_codec_iface_t *iface, + aom_codec_enc_cfg_t *enc_cfg) { + aom_codec_err_t res; int i, sl, tl; SvcInternal_t *const si = get_svc_internal(svc_ctx); if (svc_ctx == NULL || codec_ctx == NULL || iface == NULL || @@ -486,14 +486,14 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, enc_cfg->g_error_resilient = 1; // Initialize codec - res = vpx_codec_enc_init(codec_ctx, iface, enc_cfg, VPX_CODEC_USE_PSNR); + res = aom_codec_enc_init(codec_ctx, iface, enc_cfg, VPX_CODEC_USE_PSNR); if (res != VPX_CODEC_OK) { svc_log(svc_ctx, SVC_LOG_ERROR, "svc_enc_init error\n"); return res; } if (svc_ctx->spatial_layers > 1 || svc_ctx->temporal_layers > 1) { - vpx_codec_control(codec_ctx, VP9E_SET_SVC, 1); - vpx_codec_control(codec_ctx, VP9E_SET_SVC_PARAMETERS, &si->svc_params); + aom_codec_control(codec_ctx, VP9E_SET_SVC, 1); + aom_codec_control(codec_ctx, VP9E_SET_SVC_PARAMETERS, &si->svc_params); } return VPX_CODEC_OK; } @@ -502,12 +502,12 @@ vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, * Encode a frame into multiple layers * Create a superframe containing the individual layers */ -vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, - struct vpx_image *rawimg, vpx_codec_pts_t pts, +aom_codec_err_t aom_svc_encode(SvcContext *svc_ctx, aom_codec_ctx_t *codec_ctx, + struct aom_image *rawimg, aom_codec_pts_t pts, int64_t duration, int deadline) { - vpx_codec_err_t res; - vpx_codec_iter_t iter; - const vpx_codec_cx_pkt_t *cx_pkt; + aom_codec_err_t res; + aom_codec_iter_t iter; + const aom_codec_cx_pkt_t *cx_pkt; SvcInternal_t *const si = get_svc_internal(svc_ctx); if (svc_ctx == NULL || codec_ctx == NULL || si == NULL) { return VPX_CODEC_INVALID_PARAM; @@ -516,13 +516,13 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, svc_log_reset(svc_ctx); res = - vpx_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0, deadline); + aom_codec_encode(codec_ctx, rawimg, pts, (uint32_t)duration, 0, deadline); if (res != VPX_CODEC_OK) { return res; } // save compressed data iter = NULL; - while ((cx_pkt = vpx_codec_get_cx_data(codec_ctx, &iter))) { + while ((cx_pkt = aom_codec_get_cx_data(codec_ctx, &iter))) { switch (cx_pkt->kind) { #if VPX_ENCODER_ABI_VERSION > (5 + VPX_CODEC_ABI_VERSION) #if CONFIG_SPATIAL_SVC @@ -568,7 +568,7 @@ vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, return VPX_CODEC_OK; } -const char *vpx_svc_get_message(const SvcContext *svc_ctx) { +const char *aom_svc_get_message(const SvcContext *svc_ctx) { const SvcInternal_t *const si = get_const_svc_internal(svc_ctx); if (svc_ctx == NULL || si == NULL) return NULL; return si->message_buffer; @@ -580,7 +580,7 @@ static double calc_psnr(double d) { } // dump accumulated statistics and reset accumulated values -const char *vpx_svc_dump_statistics(SvcContext *svc_ctx) { +const char *aom_svc_dump_statistics(SvcContext *svc_ctx) { int number_of_frames; int i, j; uint32_t bytes_total = 0; @@ -595,7 +595,7 @@ const char *vpx_svc_dump_statistics(SvcContext *svc_ctx) { svc_log_reset(svc_ctx); number_of_frames = si->psnr_pkt_received; - if (number_of_frames <= 0) return vpx_svc_get_message(svc_ctx); + if (number_of_frames <= 0) return aom_svc_get_message(svc_ctx); svc_log(svc_ctx, SVC_LOG_INFO, "\n"); for (i = 0; i < svc_ctx->spatial_layers; ++i) { @@ -635,10 +635,10 @@ const char *vpx_svc_dump_statistics(SvcContext *svc_ctx) { si->psnr_pkt_received = 0; svc_log(svc_ctx, SVC_LOG_INFO, "Total Bytes=[%u]\n", bytes_total); - return vpx_svc_get_message(svc_ctx); + return aom_svc_get_message(svc_ctx); } -void vpx_svc_release(SvcContext *svc_ctx) { +void aom_svc_release(SvcContext *svc_ctx) { SvcInternal_t *si; if (svc_ctx == NULL) return; // do not use get_svc_internal as it will unnecessarily allocate an diff --git a/aom/svc_context.h b/aom/svc_context.h index 7216749918108de3d9857634f60873de17847fae..4dcfe0ec7f88997d9dc1c04df986d365b02f83d1 100644 --- a/aom/svc_context.h +++ b/aom/svc_context.h @@ -18,7 +18,7 @@ #define VPX_SVC_CONTEXT_H_ #include "./vp8cx.h" -#include "./vpx_encoder.h" +#include "./aom_encoder.h" #ifdef __cplusplus extern "C" { @@ -42,7 +42,7 @@ typedef struct { int speed; // speed setting for codec int threads; int aqmode; // turns on aq-mode=3 (cyclic_refresh): 0=off, 1=on. - // private storage for vpx_svc_encode + // private storage for aom_svc_encode void *internal; } SvcContext; @@ -50,10 +50,10 @@ typedef struct { #define COMPONENTS 4 // psnr & sse statistics maintained for total, y, u, v typedef struct SvcInternal { - char options[OPTION_BUFFER_SIZE]; // set by vpx_svc_set_options + char options[OPTION_BUFFER_SIZE]; // set by aom_svc_set_options // values extracted from option, quantizers - vpx_svc_extra_cfg_t svc_params; + aom_svc_extra_cfg_t svc_params; int enable_auto_alt_ref[VPX_SS_MAX_LAYERS]; int bitrates[VPX_SS_MAX_LAYERS]; @@ -73,7 +73,7 @@ typedef struct SvcInternal { int use_multiple_frame_contexts; char message_buffer[2048]; - vpx_codec_ctx_t *codec_ctx; + aom_codec_ctx_t *codec_ctx; } SvcInternal_t; /** @@ -84,35 +84,35 @@ typedef struct SvcInternal { * scaling-factors=/,/,... * quantizers=,,... */ -vpx_codec_err_t vpx_svc_set_options(SvcContext *svc_ctx, const char *options); +aom_codec_err_t aom_svc_set_options(SvcContext *svc_ctx, const char *options); /** * initialize SVC encoding */ -vpx_codec_err_t vpx_svc_init(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, - vpx_codec_iface_t *iface, - vpx_codec_enc_cfg_t *cfg); +aom_codec_err_t aom_svc_init(SvcContext *svc_ctx, aom_codec_ctx_t *codec_ctx, + aom_codec_iface_t *iface, + aom_codec_enc_cfg_t *cfg); /** * encode a frame of video with multiple layers */ -vpx_codec_err_t vpx_svc_encode(SvcContext *svc_ctx, vpx_codec_ctx_t *codec_ctx, - struct vpx_image *rawimg, vpx_codec_pts_t pts, +aom_codec_err_t aom_svc_encode(SvcContext *svc_ctx, aom_codec_ctx_t *codec_ctx, + struct aom_image *rawimg, aom_codec_pts_t pts, int64_t duration, int deadline); /** * finished with svc encoding, release allocated resources */ -void vpx_svc_release(SvcContext *svc_ctx); +void aom_svc_release(SvcContext *svc_ctx); /** * dump accumulated statistics and reset accumulated values */ -const char *vpx_svc_dump_statistics(SvcContext *svc_ctx); +const char *aom_svc_dump_statistics(SvcContext *svc_ctx); /** * get status message from previous encode */ -const char *vpx_svc_get_message(const SvcContext *svc_ctx); +const char *aom_svc_get_message(const SvcContext *svc_ctx); #ifdef __cplusplus } // extern "C" diff --git a/aom/vp8.h b/aom/vp8.h index 741def71478e2d54d56deede144a5a231b731abe..d9413f315a3d391c501bd55a1672f7510b64fd0e 100644 --- a/aom/vp8.h +++ b/aom/vp8.h @@ -11,7 +11,7 @@ /*!\defgroup vp8 VP8 * \ingroup codecs - * VP8 is vpx's newest video compression algorithm that uses motion + * VP8 is aom's newest video compression algorithm that uses motion * compensated prediction, Discrete Cosine Transform (DCT) coding of the * prediction error signal and context dependent entropy coding techniques * based on arithmetic principles. It features: @@ -31,8 +31,8 @@ #ifndef VPX_VP8_H_ #define VPX_VP8_H_ -#include "./vpx_codec.h" -#include "./vpx_image.h" +#include "./aom_codec.h" +#include "./aom_image.h" #ifdef __cplusplus extern "C" { @@ -98,20 +98,20 @@ typedef struct vp8_postproc_cfg { * * The set of macros define the type of VP8 reference frames */ -typedef enum vpx_ref_frame_type { +typedef enum aom_ref_frame_type { VP8_LAST_FRAME = 1, VP8_GOLD_FRAME = 2, VP8_ALTR_FRAME = 4 -} vpx_ref_frame_type_t; +} aom_ref_frame_type_t; /*!\brief reference frame data struct * * Define the data struct to access vp8 reference frames. */ -typedef struct vpx_ref_frame { - vpx_ref_frame_type_t frame_type; /**< which reference frame */ - vpx_image_t img; /**< reference frame data in image format */ -} vpx_ref_frame_t; +typedef struct aom_ref_frame { + aom_ref_frame_type_t frame_type; /**< which reference frame */ + aom_image_t img; /**< reference frame data in image format */ +} aom_ref_frame_t; /*!\brief VP9 specific reference frame data struct * @@ -119,7 +119,7 @@ typedef struct vpx_ref_frame { */ typedef struct vp9_ref_frame { int idx; /**< frame index to get (input) */ - vpx_image_t img; /**< img structure to populate (output) */ + aom_image_t img; /**< img structure to populate (output) */ } vp9_ref_frame_t; /*!\cond */ @@ -127,9 +127,9 @@ typedef struct vp9_ref_frame { * * defines the data type for each of VP8 decoder control function requires */ -VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, vpx_ref_frame_t *) +VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, aom_ref_frame_t *) #define VPX_CTRL_VP8_SET_REFERENCE -VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, vpx_ref_frame_t *) +VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, aom_ref_frame_t *) #define VPX_CTRL_VP8_COPY_REFERENCE VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC, vp8_postproc_cfg_t *) #define VPX_CTRL_VP8_SET_POSTPROC diff --git a/aom/vp8cx.h b/aom/vp8cx.h index 547f9fec6573165851e3be56c8e16117111ac197..882d084377547804541c32bde1737ae38d6acc0b 100644 --- a/aom/vp8cx.h +++ b/aom/vp8cx.h @@ -17,11 +17,11 @@ * @{ */ #include "./vp8.h" -#include "./vpx_encoder.h" +#include "./aom_encoder.h" /*!\file * \brief Provides definitions for using VP8 or VP9 encoder algorithm within the - * vpx Codec Interface. + * aom Codec Interface. */ #ifdef __cplusplus @@ -33,8 +33,8 @@ extern "C" { * This interface provides the capability to encode raw VP10 streams. * @{ */ -extern vpx_codec_iface_t vpx_codec_vp10_cx_algo; -extern vpx_codec_iface_t *vpx_codec_vp10_cx(void); +extern aom_codec_iface_t aom_codec_vp10_cx_algo; +extern aom_codec_iface_t *aom_codec_vp10_cx(void); /*!@} - end algorithm interface member group*/ /* @@ -112,7 +112,7 @@ extern vpx_codec_iface_t *vpx_codec_vp10_cx(void); * This set of macros define the control functions available for VPx * encoder interface. * - * \sa #vpx_codec_control + * \sa #aom_codec_control */ enum vp8e_enc_control_id { /*!\brief Codec control function to pass an ROI map to encoder. @@ -219,7 +219,7 @@ enum vp8e_enc_control_id { /*!\brief Codec control function to set constrained quality level. * - * \attention For this value to be used vpx_codec_enc_cfg_t::g_usage must be + * \attention For this value to be used aom_codec_enc_cfg_t::g_usage must be * set to #VPX_CQ. * \note Valid range: 0..63 * @@ -465,8 +465,8 @@ enum vp8e_enc_control_id { VP9E_SET_SVC_PARAMETERS, /*!\brief Codec control function to set svc layer for spatial and temporal. - * \note Valid ranges: 0..#vpx_codec_enc_cfg::ss_number_layers for spatial - * layer and 0..#vpx_codec_enc_cfg::ts_number_layers for + * \note Valid ranges: 0..#aom_codec_enc_cfg::ss_number_layers for spatial + * layer and 0..#aom_codec_enc_cfg::ts_number_layers for * temporal layer. * * Supported in codecs: VP9 @@ -558,7 +558,7 @@ enum vp8e_enc_control_id { /*!\brief Codec control function to set the frame flags and buffer indices * for spatial layers. The frame flags and buffer indices are set using the - * struct #vpx_svc_ref_frame_config defined below. + * struct #aom_svc_ref_frame_config defined below. * * Supported in codecs: VP9 */ @@ -573,11 +573,11 @@ enum vp8e_enc_control_id { VP9E_SET_RENDER_SIZE, }; -/*!\brief vpx 1-D scaling mode +/*!\brief aom 1-D scaling mode * - * This set of constants define 1-D vpx scaling modes + * This set of constants define 1-D aom scaling modes */ -typedef enum vpx_scaling_mode_1d { +typedef enum aom_scaling_mode_1d { VP8E_NORMAL = 0, VP8E_FOURFIVE = 1, VP8E_THREEFIVE = 2, @@ -611,13 +611,13 @@ typedef enum vp9e_temporal_layering_mode { VP9E_TEMPORAL_LAYERING_MODE_0212 = 3 } VP9E_TEMPORAL_LAYERING_MODE; -/*!\brief vpx region of interest map +/*!\brief aom region of interest map * * These defines the data structures for the region of interest map * */ -typedef struct vpx_roi_map { +typedef struct aom_roi_map { /*! An id between 0 and 3 for each 16x16 region within a frame. */ unsigned char *roi_map; unsigned int rows; /**< Number of rows. */ @@ -629,31 +629,31 @@ typedef struct vpx_roi_map { int delta_lf[4]; /**< Loop filter deltas. */ /*! Static breakout threshold for each segment. */ unsigned int static_threshold[4]; -} vpx_roi_map_t; +} aom_roi_map_t; -/*!\brief vpx active region map +/*!\brief aom active region map * * These defines the data structures for active region map * */ -typedef struct vpx_active_map { +typedef struct aom_active_map { unsigned char *active_map; /**< specify an on (1) or off (0) each 16x16 region within a frame */ unsigned int rows; /**< number of rows */ unsigned int cols; /**< number of cols */ -} vpx_active_map_t; +} aom_active_map_t; -/*!\brief vpx image scaling mode +/*!\brief aom image scaling mode * * This defines the data structure for image scaling mode * */ -typedef struct vpx_scaling_mode { +typedef struct aom_scaling_mode { VPX_SCALING_MODE h_scaling_mode; /**< horizontal scaling mode */ VPX_SCALING_MODE v_scaling_mode; /**< vertical scaling mode */ -} vpx_scaling_mode_t; +} aom_scaling_mode_t; /*!\brief VP8 token partition mode * @@ -674,14 +674,14 @@ typedef enum { VPX_CONTENT_DEFAULT, VPX_CONTENT_SCREEN, VPX_CONTENT_INVALID -} vpx_tune_content; +} aom_tune_content; /*!\brief VP8 model tuning parameters * * Changes the encoder to tune for certain types of input material. * */ -typedef enum { VPX_TUNE_PSNR, VPX_TUNE_SSIM } vpx_tune_metric; +typedef enum { VPX_TUNE_PSNR, VPX_TUNE_SSIM } aom_tune_metric; /*!\brief vp9 svc layer parameters * @@ -690,10 +690,10 @@ typedef enum { VPX_TUNE_PSNR, VPX_TUNE_SSIM } vpx_tune_metric; * temporal layer id for the current frame. * */ -typedef struct vpx_svc_layer_id { +typedef struct aom_svc_layer_id { int spatial_layer_id; /**< Spatial layer id number. */ int temporal_layer_id; /**< Temporal layer id number. */ -} vpx_svc_layer_id_t; +} aom_svc_layer_id_t; /*!\brief vp9 svc frame flag parameters. * @@ -703,12 +703,12 @@ typedef struct vpx_svc_layer_id { * flags and buffer indices for each spatial layer for the current (super)frame. * */ -typedef struct vpx_svc_ref_frame_config { +typedef struct aom_svc_ref_frame_config { int frame_flags[VPX_TS_MAX_LAYERS]; /**< Frame flags. */ int lst_fb_idx[VPX_TS_MAX_LAYERS]; /**< Last buffer index. */ int gld_fb_idx[VPX_TS_MAX_LAYERS]; /**< Golden buffer index. */ int alt_fb_idx[VPX_TS_MAX_LAYERS]; /**< Altref buffer index. */ -} vpx_svc_ref_frame_config_t; +} aom_svc_ref_frame_config_t; /*!\cond */ /*!\brief VP8 encoder control function parameter type @@ -722,11 +722,11 @@ VPX_CTRL_USE_TYPE(VP8E_SET_FRAME_FLAGS, int) #define VPX_CTRL_VP8E_SET_FRAME_FLAGS VPX_CTRL_USE_TYPE(VP8E_SET_TEMPORAL_LAYER_ID, int) #define VPX_CTRL_VP8E_SET_TEMPORAL_LAYER_ID -VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, vpx_roi_map_t *) +VPX_CTRL_USE_TYPE(VP8E_SET_ROI_MAP, aom_roi_map_t *) #define VPX_CTRL_VP8E_SET_ROI_MAP -VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, vpx_active_map_t *) +VPX_CTRL_USE_TYPE(VP8E_SET_ACTIVEMAP, aom_active_map_t *) #define VPX_CTRL_VP8E_SET_ACTIVEMAP -VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, vpx_scaling_mode_t *) +VPX_CTRL_USE_TYPE(VP8E_SET_SCALEMODE, aom_scaling_mode_t *) #define VPX_CTRL_VP8E_SET_SCALEMODE VPX_CTRL_USE_TYPE(VP9E_SET_SVC, int) @@ -735,7 +735,7 @@ VPX_CTRL_USE_TYPE(VP9E_SET_SVC_PARAMETERS, void *) #define VPX_CTRL_VP9E_SET_SVC_PARAMETERS VPX_CTRL_USE_TYPE(VP9E_REGISTER_CX_CALLBACK, void *) #define VPX_CTRL_VP9E_REGISTER_CX_CALLBACK -VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID, vpx_svc_layer_id_t *) +VPX_CTRL_USE_TYPE(VP9E_SET_SVC_LAYER_ID, aom_svc_layer_id_t *) #define VPX_CTRL_VP9E_SET_SVC_LAYER_ID VPX_CTRL_USE_TYPE(VP8E_SET_CPUUSED, int) @@ -757,7 +757,7 @@ VPX_CTRL_USE_TYPE(VP8E_SET_ARNR_STRENGTH, unsigned int) #define VPX_CTRL_VP8E_SET_ARNR_STRENGTH VPX_CTRL_USE_TYPE_DEPRECATED(VP8E_SET_ARNR_TYPE, unsigned int) #define VPX_CTRL_VP8E_SET_ARNR_TYPE -VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* vpx_tune_metric */ +VPX_CTRL_USE_TYPE(VP8E_SET_TUNING, int) /* aom_tune_metric */ #define VPX_CTRL_VP8E_SET_TUNING VPX_CTRL_USE_TYPE(VP8E_SET_CQ_LEVEL, unsigned int) #define VPX_CTRL_VP8E_SET_CQ_LEVEL @@ -771,7 +771,7 @@ VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER, int *) #define VPX_CTRL_VP8E_GET_LAST_QUANTIZER VPX_CTRL_USE_TYPE(VP8E_GET_LAST_QUANTIZER_64, int *) #define VPX_CTRL_VP8E_GET_LAST_QUANTIZER_64 -VPX_CTRL_USE_TYPE(VP9E_GET_SVC_LAYER_ID, vpx_svc_layer_id_t *) +VPX_CTRL_USE_TYPE(VP9E_GET_SVC_LAYER_ID, aom_svc_layer_id_t *) #define VPX_CTRL_VP9E_GET_SVC_LAYER_ID VPX_CTRL_USE_TYPE(VP8E_SET_MAX_INTRA_BITRATE_PCT, unsigned int) @@ -811,7 +811,7 @@ VPX_CTRL_USE_TYPE(VP9E_SET_FRAME_PERIODIC_BOOST, unsigned int) VPX_CTRL_USE_TYPE(VP9E_SET_NOISE_SENSITIVITY, unsigned int) #define VPX_CTRL_VP9E_SET_NOISE_SENSITIVITY -VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* vpx_tune_content */ +VPX_CTRL_USE_TYPE(VP9E_SET_TUNE_CONTENT, int) /* aom_tune_content */ #define VPX_CTRL_VP9E_SET_TUNE_CONTENT VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_SPACE, int) @@ -823,13 +823,13 @@ VPX_CTRL_USE_TYPE(VP9E_SET_MIN_GF_INTERVAL, unsigned int) VPX_CTRL_USE_TYPE(VP9E_SET_MAX_GF_INTERVAL, unsigned int) #define VPX_CTRL_VP9E_SET_MAX_GF_INTERVAL -VPX_CTRL_USE_TYPE(VP9E_GET_ACTIVEMAP, vpx_active_map_t *) +VPX_CTRL_USE_TYPE(VP9E_GET_ACTIVEMAP, aom_active_map_t *) #define VPX_CTRL_VP9E_GET_ACTIVEMAP VPX_CTRL_USE_TYPE(VP9E_SET_COLOR_RANGE, int) #define VPX_CTRL_VP9E_SET_COLOR_RANGE -VPX_CTRL_USE_TYPE(VP9E_SET_SVC_REF_FRAME_CONFIG, vpx_svc_ref_frame_config_t *) +VPX_CTRL_USE_TYPE(VP9E_SET_SVC_REF_FRAME_CONFIG, aom_svc_ref_frame_config_t *) #define VPX_CTRL_VP9E_SET_SVC_REF_FRAME_CONFIG VPX_CTRL_USE_TYPE(VP9E_SET_RENDER_SIZE, int *) diff --git a/aom/vp8dx.h b/aom/vp8dx.h index fde11349eb3e5cedaf63df843bb1e46aac54a277..307d217a25975e177b89cd1aa24952725bf233ac 100644 --- a/aom/vp8dx.h +++ b/aom/vp8dx.h @@ -15,7 +15,7 @@ * @{ */ /*!\file - * \brief Provides definitions for using VP8 or VP9 within the vpx Decoder + * \brief Provides definitions for using VP8 or VP9 within the aom Decoder * interface. */ #ifndef VPX_VP8DX_H_ @@ -33,8 +33,8 @@ extern "C" { * This interface provides the capability to decode VP10 streams. * @{ */ -extern vpx_codec_iface_t vpx_codec_vp10_dx_algo; -extern vpx_codec_iface_t *vpx_codec_vp10_dx(void); +extern aom_codec_iface_t aom_codec_vp10_dx_algo; +extern aom_codec_iface_t *aom_codec_vp10_dx(void); /*!@} - end algorithm interface member group*/ /*!\enum vp8_dec_control_id @@ -43,7 +43,7 @@ extern vpx_codec_iface_t *vpx_codec_vp10_dx(void); * This set of macros define the control functions available for the VP8 * decoder interface. * - * \sa #vpx_codec_control + * \sa #aom_codec_control */ enum vp8_dec_control_id { /** control function to get info on which reference frames were updated @@ -60,7 +60,7 @@ enum vp8_dec_control_id { VP8D_GET_LAST_REF_USED, /** decryption function to decrypt encoded buffer data immediately - * before decoding. Takes a vpx_decrypt_init, which contains + * before decoding. Takes a aom_decrypt_init, which contains * a callback function and opaque context pointer. */ VPXD_SET_DECRYPTOR, @@ -109,24 +109,24 @@ enum vp8_dec_control_id { /** Decrypt n bytes of data from input -> output, using the decrypt_state * passed in VPXD_SET_DECRYPTOR. */ -typedef void (*vpx_decrypt_cb)(void *decrypt_state, const unsigned char *input, +typedef void (*aom_decrypt_cb)(void *decrypt_state, const unsigned char *input, unsigned char *output, int count); /*!\brief Structure to hold decryption state * * Defines a structure to hold the decryption state and access function. */ -typedef struct vpx_decrypt_init { +typedef struct aom_decrypt_init { /*! Decrypt callback. */ - vpx_decrypt_cb decrypt_cb; + aom_decrypt_cb decrypt_cb; /*! Decryption state. */ void *decrypt_state; -} vpx_decrypt_init; +} aom_decrypt_init; -/*!\brief A deprecated alias for vpx_decrypt_init. +/*!\brief A deprecated alias for aom_decrypt_init. */ -typedef vpx_decrypt_init vp8_decrypt_init; +typedef aom_decrypt_init vp8_decrypt_init; /*!\cond */ /*!\brief VP8 decoder control function parameter type @@ -142,9 +142,9 @@ VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *) #define VPX_CTRL_VP8D_GET_FRAME_CORRUPTED VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *) #define VPX_CTRL_VP8D_GET_LAST_REF_USED -VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, vpx_decrypt_init *) +VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, aom_decrypt_init *) #define VPX_CTRL_VPXD_SET_DECRYPTOR -VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vpx_decrypt_init *) +VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, aom_decrypt_init *) #define VPX_CTRL_VP8D_SET_DECRYPTOR VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE, int *) #define VPX_CTRL_VP9D_GET_DISPLAY_SIZE diff --git a/aom_dsp/vpx_convolve.c b/aom_dsp/aom_convolve.c similarity index 90% rename from aom_dsp/vpx_convolve.c rename to aom_dsp/aom_convolve.c index 2f91df6c4e83422eb05fa2803bc3676fdc8f8ddb..233e8502073a6d46d5c27f84573063519bb004f7 100644 --- a/aom_dsp/vpx_convolve.c +++ b/aom_dsp/aom_convolve.c @@ -12,12 +12,12 @@ #include #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_dsp/vpx_filter.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/aom_filter.h" #include "aom_ports/mem.h" static void convolve_horiz(const uint8_t *src, ptrdiff_t src_stride, @@ -154,7 +154,7 @@ static int get_filter_offset(const int16_t *f, const InterpKernel *base) { return (int)((const InterpKernel *)(intptr_t)f - base); } -void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -169,7 +169,7 @@ void vpx_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, w, h); } -void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -184,7 +184,7 @@ void vpx_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, x_step_q4, w, h); } -void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -199,7 +199,7 @@ void vpx_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, w, h); } -void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -214,7 +214,7 @@ void vpx_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, y_step_q4, w, h); } -void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { @@ -228,7 +228,7 @@ void vpx_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, filters_y, y0_q4, y_step_q4, w, h); } -void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { @@ -237,12 +237,12 @@ void vpx_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, assert(w <= 64); assert(h <= 64); - vpx_convolve8_c(src, src_stride, temp, 64, filter_x, x_step_q4, filter_y, + aom_convolve8_c(src, src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, h); - vpx_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); + aom_convolve_avg_c(temp, 64, dst, dst_stride, NULL, 0, NULL, 0, w, h); } -void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, int h) { @@ -260,7 +260,7 @@ void vpx_convolve_copy_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, } } -void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, int h) { @@ -279,53 +279,53 @@ void vpx_convolve_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, } } -void vpx_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_scaled_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_scaled_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_scaled_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_scaled_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_scaled_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } @@ -464,7 +464,7 @@ static void highbd_convolve(const uint8_t *src, ptrdiff_t src_stride, bd); } -void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -478,7 +478,7 @@ void vpx_highbd_convolve8_horiz_c(const uint8_t *src, ptrdiff_t src_stride, x_step_q4, w, h, bd); } -void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, @@ -492,7 +492,7 @@ void vpx_highbd_convolve8_avg_horiz_c(const uint8_t *src, ptrdiff_t src_stride, x_step_q4, w, h, bd); } -void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -506,7 +506,7 @@ void vpx_highbd_convolve8_vert_c(const uint8_t *src, ptrdiff_t src_stride, y_step_q4, w, h, bd); } -void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, @@ -520,7 +520,7 @@ void vpx_highbd_convolve8_avg_vert_c(const uint8_t *src, ptrdiff_t src_stride, y_step_q4, w, h, bd); } -void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -535,7 +535,7 @@ void vpx_highbd_convolve8_c(const uint8_t *src, ptrdiff_t src_stride, filters_y, y0_q4, y_step_q4, w, h, bd); } -void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, +void aom_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -545,13 +545,13 @@ void vpx_highbd_convolve8_avg_c(const uint8_t *src, ptrdiff_t src_stride, assert(w <= 64); assert(h <= 64); - vpx_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64, + aom_highbd_convolve8_c(src, src_stride, CONVERT_TO_BYTEPTR(temp), 64, filter_x, x_step_q4, filter_y, y_step_q4, w, h, bd); - vpx_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, NULL, + aom_highbd_convolve_avg_c(CONVERT_TO_BYTEPTR(temp), 64, dst, dst_stride, NULL, 0, NULL, 0, w, h, bd); } -void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, +void aom_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, @@ -572,7 +572,7 @@ void vpx_highbd_convolve_copy_c(const uint8_t *src8, ptrdiff_t src_stride, } } -void vpx_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride, +void aom_highbd_convolve_avg_c(const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, diff --git a/aom_dsp/vpx_convolve.h b/aom_dsp/aom_convolve.h similarity index 96% rename from aom_dsp/vpx_convolve.h rename to aom_dsp/aom_convolve.h index b327ecc7ffaf4498d553d785e873bda2bd5f09f6..1fc4af4f6ff3c0ccde5d2cf3b35c86d93616f1de 100644 --- a/aom_dsp/vpx_convolve.h +++ b/aom_dsp/aom_convolve.h @@ -11,8 +11,8 @@ #ifndef VPX_DSP_VPX_CONVOLVE_H_ #define VPX_DSP_VPX_CONVOLVE_H_ -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #ifdef __cplusplus extern "C" { diff --git a/aom_dsp/vpx_dsp.mk b/aom_dsp/aom_dsp.mk similarity index 85% rename from aom_dsp/vpx_dsp.mk rename to aom_dsp/aom_dsp.mk index 8549d16e17d0e15c779fa35dfeb90371474596fc..fcc04c2a062bf7aec0f440962a7b40bea434aa66 100644 --- a/aom_dsp/vpx_dsp.mk +++ b/aom_dsp/aom_dsp.mk @@ -8,8 +8,8 @@ ## be found in the AUTHORS file in the root of the source tree. ## -DSP_SRCS-yes += vpx_dsp.mk -DSP_SRCS-yes += vpx_dsp_common.h +DSP_SRCS-yes += aom_dsp.mk +DSP_SRCS-yes += aom_dsp_common.h DSP_SRCS-$(HAVE_MSA) += mips/macros_msa.h @@ -42,7 +42,7 @@ ifeq ($(CONFIG_USE_X86INC),yes) DSP_SRCS-$(HAVE_SSE) += x86/intrapred_sse2.asm DSP_SRCS-$(HAVE_SSE2) += x86/intrapred_sse2.asm DSP_SRCS-$(HAVE_SSSE3) += x86/intrapred_ssse3.asm -DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm +DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm endif # CONFIG_USE_X86INC ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes) @@ -63,52 +63,52 @@ DSP_SRCS-$(HAVE_DSPR2) += mips/common_dspr2.h DSP_SRCS-$(HAVE_DSPR2) += mips/common_dspr2.c # interpolation filters -DSP_SRCS-yes += vpx_convolve.c -DSP_SRCS-yes += vpx_convolve.h -DSP_SRCS-yes += vpx_filter.h +DSP_SRCS-yes += aom_convolve.c +DSP_SRCS-yes += aom_convolve.h +DSP_SRCS-yes += aom_filter.h DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/convolve.h -DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/vpx_asm_stubs.c -DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_8t_sse2.asm -DSP_SRCS-$(HAVE_SSE2) += x86/vpx_subpixel_bilinear_sse2.asm -DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_ssse3.asm -DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_bilinear_ssse3.asm -DSP_SRCS-$(HAVE_AVX2) += x86/vpx_subpixel_8t_intrin_avx2.c -DSP_SRCS-$(HAVE_SSSE3) += x86/vpx_subpixel_8t_intrin_ssse3.c +DSP_SRCS-$(ARCH_X86)$(ARCH_X86_64) += x86/aom_asm_stubs.c +DSP_SRCS-$(HAVE_SSE2) += x86/aom_subpixel_8t_sse2.asm +DSP_SRCS-$(HAVE_SSE2) += x86/aom_subpixel_bilinear_sse2.asm +DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm +DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_bilinear_ssse3.asm +DSP_SRCS-$(HAVE_AVX2) += x86/aom_subpixel_8t_intrin_avx2.c +DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_intrin_ssse3.c ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes) -DSP_SRCS-$(HAVE_SSE2) += x86/vpx_high_subpixel_8t_sse2.asm -DSP_SRCS-$(HAVE_SSE2) += x86/vpx_high_subpixel_bilinear_sse2.asm +DSP_SRCS-$(HAVE_SSE2) += x86/aom_high_subpixel_8t_sse2.asm +DSP_SRCS-$(HAVE_SSE2) += x86/aom_high_subpixel_bilinear_sse2.asm endif ifeq ($(CONFIG_USE_X86INC),yes) -DSP_SRCS-$(HAVE_SSE2) += x86/vpx_convolve_copy_sse2.asm +DSP_SRCS-$(HAVE_SSE2) += x86/aom_convolve_copy_sse2.asm endif ifeq ($(HAVE_NEON_ASM),yes) -DSP_SRCS-yes += arm/vpx_convolve_copy_neon_asm$(ASM) -DSP_SRCS-yes += arm/vpx_convolve8_avg_neon_asm$(ASM) -DSP_SRCS-yes += arm/vpx_convolve8_neon_asm$(ASM) -DSP_SRCS-yes += arm/vpx_convolve_avg_neon_asm$(ASM) -DSP_SRCS-yes += arm/vpx_convolve_neon.c +DSP_SRCS-yes += arm/aom_convolve_copy_neon_asm$(ASM) +DSP_SRCS-yes += arm/aom_convolve8_avg_neon_asm$(ASM) +DSP_SRCS-yes += arm/aom_convolve8_neon_asm$(ASM) +DSP_SRCS-yes += arm/aom_convolve_avg_neon_asm$(ASM) +DSP_SRCS-yes += arm/aom_convolve_neon.c else ifeq ($(HAVE_NEON),yes) -DSP_SRCS-yes += arm/vpx_convolve_copy_neon.c -DSP_SRCS-yes += arm/vpx_convolve8_avg_neon.c -DSP_SRCS-yes += arm/vpx_convolve8_neon.c -DSP_SRCS-yes += arm/vpx_convolve_avg_neon.c -DSP_SRCS-yes += arm/vpx_convolve_neon.c +DSP_SRCS-yes += arm/aom_convolve_copy_neon.c +DSP_SRCS-yes += arm/aom_convolve8_avg_neon.c +DSP_SRCS-yes += arm/aom_convolve8_neon.c +DSP_SRCS-yes += arm/aom_convolve_avg_neon.c +DSP_SRCS-yes += arm/aom_convolve_neon.c endif # HAVE_NEON endif # HAVE_NEON_ASM # common (msa) -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_horiz_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_avg_vert_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_horiz_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve8_vert_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_avg_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_copy_msa.c -DSP_SRCS-$(HAVE_MSA) += mips/vpx_convolve_msa.h +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_horiz_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_avg_vert_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_horiz_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve8_vert_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_avg_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_copy_msa.c +DSP_SRCS-$(HAVE_MSA) += mips/aom_convolve_msa.h # common (dspr2) DSP_SRCS-$(HAVE_DSPR2) += mips/convolve_common_dspr2.h @@ -345,7 +345,7 @@ endif # CONFIG_ENCODERS DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes) -DSP_SRCS-yes += vpx_dsp_rtcd.c -DSP_SRCS-yes += vpx_dsp_rtcd_defs.pl +DSP_SRCS-yes += aom_dsp_rtcd.c +DSP_SRCS-yes += aom_dsp_rtcd_defs.pl -$(eval $(call rtcd_h_template,vpx_dsp_rtcd,aom_dsp/vpx_dsp_rtcd_defs.pl)) +$(eval $(call rtcd_h_template,aom_dsp_rtcd,aom_dsp/aom_dsp_rtcd_defs.pl)) diff --git a/aom_dsp/vpx_dsp_common.h b/aom_dsp/aom_dsp_common.h similarity index 95% rename from aom_dsp/vpx_dsp_common.h rename to aom_dsp/aom_dsp_common.h index 971b57fc44b7e82140614b9aca7502e25b210028..54a3c327d44c438e41d86bcc62ca7f6070edb05a 100644 --- a/aom_dsp/vpx_dsp_common.h +++ b/aom_dsp/aom_dsp_common.h @@ -12,9 +12,9 @@ #ifndef VPX_DSP_COMMON_H_ #define VPX_DSP_COMMON_H_ -#include "./vpx_config.h" -#include "aom/vpx_integer.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #ifdef __cplusplus diff --git a/aom_dsp/vpx_dsp_rtcd.c b/aom_dsp/aom_dsp_rtcd.c similarity index 79% rename from aom_dsp/vpx_dsp_rtcd.c rename to aom_dsp/aom_dsp_rtcd.c index 0d6140099c83f6094bf9e676c6b66c6249dd5348..11a57d3822e98a9d17b0d415d56b210ba04ffbbc 100644 --- a/aom_dsp/vpx_dsp_rtcd.c +++ b/aom_dsp/aom_dsp_rtcd.c @@ -8,9 +8,9 @@ * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" +#include "./aom_config.h" #define RTCD_C -#include "./vpx_dsp_rtcd.h" -#include "aom_ports/vpx_once.h" +#include "./aom_dsp_rtcd.h" +#include "aom_ports/aom_once.h" -void vpx_dsp_rtcd() { once(setup_rtcd_internal); } +void aom_dsp_rtcd() { once(setup_rtcd_internal); } diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl new file mode 100644 index 0000000000000000000000000000000000000000..2c504a9fd62881bbf53a1086258ed3dcfdf04159 --- /dev/null +++ b/aom_dsp/aom_dsp_rtcd_defs.pl @@ -0,0 +1,1919 @@ +sub aom_dsp_forward_decls() { +print < #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1, @@ -39,7 +39,7 @@ static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1, return qdst; } -void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, // unused @@ -219,7 +219,7 @@ void vpx_convolve8_avg_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, return; } -void vpx_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, // unused int x_step_q4, // unused diff --git a/aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm similarity index 94% rename from aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm rename to aom_dsp/arm/aom_convolve8_avg_neon_asm.asm index e279d570fc2acb1922edd61fa3132b6d4d7a83aa..fcf3d4ef84e353909b99c5d3f26f80f4095dddd3 100644 --- a/aom_dsp/arm/vpx_convolve8_avg_neon_asm.asm +++ b/aom_dsp/arm/aom_convolve8_avg_neon_asm.asm @@ -17,8 +17,8 @@ ; VP9_FILTER_WEIGHT == 128 ; VP9_FILTER_SHIFT == 7 - EXPORT |vpx_convolve8_avg_horiz_neon| - EXPORT |vpx_convolve8_avg_vert_neon| + EXPORT |aom_convolve8_avg_horiz_neon| + EXPORT |aom_convolve8_avg_vert_neon| ARM REQUIRE8 PRESERVE8 @@ -49,7 +49,7 @@ ; sp[]int w ; sp[]int h -|vpx_convolve8_avg_horiz_neon| PROC +|aom_convolve8_avg_horiz_neon| PROC push {r4-r10, lr} sub r0, r0, #3 ; adjust for taps @@ -72,7 +72,7 @@ mov r10, r6 ; w loop counter -vpx_convolve8_avg_loop_horiz_v +aom_convolve8_avg_loop_horiz_v vld1.8 {d24}, [r0], r1 vld1.8 {d25}, [r0], r1 vld1.8 {d26}, [r0], r1 @@ -95,7 +95,7 @@ vpx_convolve8_avg_loop_horiz_v add r0, r0, #3 -vpx_convolve8_avg_loop_horiz +aom_convolve8_avg_loop_horiz add r5, r0, #64 vld1.32 {d28[]}, [r0], r1 @@ -164,20 +164,20 @@ vpx_convolve8_avg_loop_horiz vmov q9, q13 subs r6, r6, #4 ; w -= 4 - bgt vpx_convolve8_avg_loop_horiz + bgt aom_convolve8_avg_loop_horiz ; outer loop mov r6, r10 ; restore w counter add r0, r0, r9 ; src += src_stride * 4 - w add r2, r2, r12 ; dst += dst_stride * 4 - w subs r7, r7, #4 ; h -= 4 - bgt vpx_convolve8_avg_loop_horiz_v + bgt aom_convolve8_avg_loop_horiz_v pop {r4-r10, pc} ENDP -|vpx_convolve8_avg_vert_neon| PROC +|aom_convolve8_avg_vert_neon| PROC push {r4-r8, lr} ; adjust for taps @@ -193,7 +193,7 @@ vpx_convolve8_avg_loop_horiz lsl r1, r1, #1 lsl r3, r3, #1 -vpx_convolve8_avg_loop_vert_h +aom_convolve8_avg_loop_vert_h mov r4, r0 add r7, r0, r1, asr #1 mov r5, r2 @@ -213,7 +213,7 @@ vpx_convolve8_avg_loop_vert_h vmovl.u8 q10, d20 vmovl.u8 q11, d22 -vpx_convolve8_avg_loop_vert +aom_convolve8_avg_loop_vert ; always process a 4x4 block at a time vld1.u32 {d24[0]}, [r7], r1 vld1.u32 {d26[0]}, [r4], r1 @@ -278,13 +278,13 @@ vpx_convolve8_avg_loop_vert vmov d22, d25 subs r12, r12, #4 ; h -= 4 - bgt vpx_convolve8_avg_loop_vert + bgt aom_convolve8_avg_loop_vert ; outer loop add r0, r0, #4 add r2, r2, #4 subs r6, r6, #4 ; w -= 4 - bgt vpx_convolve8_avg_loop_vert_h + bgt aom_convolve8_avg_loop_vert_h pop {r4-r8, pc} diff --git a/aom_dsp/arm/vpx_convolve8_neon.c b/aom_dsp/arm/aom_convolve8_neon.c similarity index 98% rename from aom_dsp/arm/vpx_convolve8_neon.c rename to aom_dsp/arm/aom_convolve8_neon.c index c712e06f7995c47efd54fd71c0c514e4dbba5d58..ed0df6dd2ae990b7499eb2ca048623d96d529b63 100644 --- a/aom_dsp/arm/vpx_convolve8_neon.c +++ b/aom_dsp/arm/aom_convolve8_neon.c @@ -12,9 +12,9 @@ #include #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1, @@ -39,7 +39,7 @@ static INLINE int32x4_t MULTIPLY_BY_Q0(int16x4_t dsrc0, int16x4_t dsrc1, return qdst; } -void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, // unused @@ -205,7 +205,7 @@ void vpx_convolve8_horiz_neon(const uint8_t *src, ptrdiff_t src_stride, return; } -void vpx_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_vert_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, // unused int x_step_q4, // unused diff --git a/aom_dsp/arm/vpx_convolve8_neon_asm.asm b/aom_dsp/arm/aom_convolve8_neon_asm.asm similarity index 94% rename from aom_dsp/arm/vpx_convolve8_neon_asm.asm rename to aom_dsp/arm/aom_convolve8_neon_asm.asm index 2d0f2ae0657299676e942f1f155eaaaa4be5ca35..315e5b5c81666e386e68a41bfd597c704494ba8b 100644 --- a/aom_dsp/arm/vpx_convolve8_neon_asm.asm +++ b/aom_dsp/arm/aom_convolve8_neon_asm.asm @@ -17,8 +17,8 @@ ; VP9_FILTER_WEIGHT == 128 ; VP9_FILTER_SHIFT == 7 - EXPORT |vpx_convolve8_horiz_neon| - EXPORT |vpx_convolve8_vert_neon| + EXPORT |aom_convolve8_horiz_neon| + EXPORT |aom_convolve8_vert_neon| ARM REQUIRE8 PRESERVE8 @@ -49,7 +49,7 @@ ; sp[]int w ; sp[]int h -|vpx_convolve8_horiz_neon| PROC +|aom_convolve8_horiz_neon| PROC push {r4-r10, lr} sub r0, r0, #3 ; adjust for taps @@ -72,7 +72,7 @@ mov r10, r6 ; w loop counter -vpx_convolve8_loop_horiz_v +aom_convolve8_loop_horiz_v vld1.8 {d24}, [r0], r1 vld1.8 {d25}, [r0], r1 vld1.8 {d26}, [r0], r1 @@ -95,7 +95,7 @@ vpx_convolve8_loop_horiz_v add r0, r0, #3 -vpx_convolve8_loop_horiz +aom_convolve8_loop_horiz add r5, r0, #64 vld1.32 {d28[]}, [r0], r1 @@ -153,20 +153,20 @@ vpx_convolve8_loop_horiz vmov q9, q13 subs r6, r6, #4 ; w -= 4 - bgt vpx_convolve8_loop_horiz + bgt aom_convolve8_loop_horiz ; outer loop mov r6, r10 ; restore w counter add r0, r0, r9 ; src += src_stride * 4 - w add r2, r2, r12 ; dst += dst_stride * 4 - w subs r7, r7, #4 ; h -= 4 - bgt vpx_convolve8_loop_horiz_v + bgt aom_convolve8_loop_horiz_v pop {r4-r10, pc} ENDP -|vpx_convolve8_vert_neon| PROC +|aom_convolve8_vert_neon| PROC push {r4-r8, lr} ; adjust for taps @@ -182,7 +182,7 @@ vpx_convolve8_loop_horiz lsl r1, r1, #1 lsl r3, r3, #1 -vpx_convolve8_loop_vert_h +aom_convolve8_loop_vert_h mov r4, r0 add r7, r0, r1, asr #1 mov r5, r2 @@ -202,7 +202,7 @@ vpx_convolve8_loop_vert_h vmovl.u8 q10, d20 vmovl.u8 q11, d22 -vpx_convolve8_loop_vert +aom_convolve8_loop_vert ; always process a 4x4 block at a time vld1.u32 {d24[0]}, [r7], r1 vld1.u32 {d26[0]}, [r4], r1 @@ -256,13 +256,13 @@ vpx_convolve8_loop_vert vmov d22, d25 subs r12, r12, #4 ; h -= 4 - bgt vpx_convolve8_loop_vert + bgt aom_convolve8_loop_vert ; outer loop add r0, r0, #4 add r2, r2, #4 subs r6, r6, #4 ; w -= 4 - bgt vpx_convolve8_loop_vert_h + bgt aom_convolve8_loop_vert_h pop {r4-r8, pc} diff --git a/aom_dsp/arm/vpx_convolve_avg_neon.c b/aom_dsp/arm/aom_convolve_avg_neon.c similarity index 97% rename from aom_dsp/arm/vpx_convolve_avg_neon.c rename to aom_dsp/arm/aom_convolve_avg_neon.c index 41df917daad5c7242d8b2db4c312f6e0c20ed854..f05d3ceae2e8f4a05da1def2ec4eab5be4c0125b 100644 --- a/aom_dsp/arm/vpx_convolve_avg_neon.c +++ b/aom_dsp/arm/aom_convolve_avg_neon.c @@ -11,10 +11,10 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" -void vpx_convolve_avg_neon(const uint8_t *src, // r0 +void aom_convolve_avg_neon(const uint8_t *src, // r0 ptrdiff_t src_stride, // r1 uint8_t *dst, // r2 ptrdiff_t dst_stride, // r3 diff --git a/aom_dsp/arm/vpx_convolve_avg_neon_asm.asm b/aom_dsp/arm/aom_convolve_avg_neon_asm.asm similarity index 98% rename from aom_dsp/arm/vpx_convolve_avg_neon_asm.asm rename to aom_dsp/arm/aom_convolve_avg_neon_asm.asm index 97e6189fda1632fcec4eeb3803575d6159c00f11..2177756e46f2521c509a14384d6be67d32eea758 100644 --- a/aom_dsp/arm/vpx_convolve_avg_neon_asm.asm +++ b/aom_dsp/arm/aom_convolve_avg_neon_asm.asm @@ -8,14 +8,14 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_convolve_avg_neon| + EXPORT |aom_convolve_avg_neon| ARM REQUIRE8 PRESERVE8 AREA ||.text||, CODE, READONLY, ALIGN=2 -|vpx_convolve_avg_neon| PROC +|aom_convolve_avg_neon| PROC push {r4-r6, lr} ldrd r4, r5, [sp, #32] mov r6, r2 diff --git a/aom_dsp/arm/vpx_convolve_copy_neon.c b/aom_dsp/arm/aom_convolve_copy_neon.c similarity index 95% rename from aom_dsp/arm/vpx_convolve_copy_neon.c rename to aom_dsp/arm/aom_convolve_copy_neon.c index fcfb2654a8c9666a29fd095dfb411668bd37d8b3..9e57c7176f33b6c4076fb0dc8a4511c69cbc077b 100644 --- a/aom_dsp/arm/vpx_convolve_copy_neon.c +++ b/aom_dsp/arm/aom_convolve_copy_neon.c @@ -11,10 +11,10 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" -void vpx_convolve_copy_neon(const uint8_t *src, // r0 +void aom_convolve_copy_neon(const uint8_t *src, // r0 ptrdiff_t src_stride, // r1 uint8_t *dst, // r2 ptrdiff_t dst_stride, // r3 diff --git a/aom_dsp/arm/vpx_convolve_copy_neon_asm.asm b/aom_dsp/arm/aom_convolve_copy_neon_asm.asm similarity index 97% rename from aom_dsp/arm/vpx_convolve_copy_neon_asm.asm rename to aom_dsp/arm/aom_convolve_copy_neon_asm.asm index 89164ad48ba0b622e35c438ac6da64ab9963f946..2d60bee3cf5e4c4a9d1508d3e6e52305fb7741fa 100644 --- a/aom_dsp/arm/vpx_convolve_copy_neon_asm.asm +++ b/aom_dsp/arm/aom_convolve_copy_neon_asm.asm @@ -8,14 +8,14 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_convolve_copy_neon| + EXPORT |aom_convolve_copy_neon| ARM REQUIRE8 PRESERVE8 AREA ||.text||, CODE, READONLY, ALIGN=2 -|vpx_convolve_copy_neon| PROC +|aom_convolve_copy_neon| PROC push {r4-r5, lr} ldrd r4, r5, [sp, #28] diff --git a/aom_dsp/arm/vpx_convolve_neon.c b/aom_dsp/arm/aom_convolve_neon.c similarity index 84% rename from aom_dsp/arm/vpx_convolve_neon.c rename to aom_dsp/arm/aom_convolve_neon.c index 92443686e8ae19b87e2653c961ac8f31d0177a3c..6c2997e040465a6373c9d770a7d4559b1ebd7814 100644 --- a/aom_dsp/arm/vpx_convolve_neon.c +++ b/aom_dsp/arm/aom_convolve_neon.c @@ -11,11 +11,11 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" -void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { @@ -35,16 +35,16 @@ void vpx_convolve8_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, * the temp buffer which has lots of extra room and is subsequently discarded * this is safe if somewhat less than ideal. */ - vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x, + aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, intermediate_height); /* Step into the temp buffer 3 lines to get the actual frame data */ - vpx_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x, + aom_convolve8_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -58,9 +58,9 @@ void vpx_convolve8_avg_neon(const uint8_t *src, ptrdiff_t src_stride, /* This implementation has the same issues as above. In addition, we only want * to average the values after both passes. */ - vpx_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x, + aom_convolve8_horiz_neon(src - src_stride * 3, src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, intermediate_height); - vpx_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x, + aom_convolve8_avg_vert_neon(temp + 64 * 3, 64, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } diff --git a/aom_dsp/arm/avg_neon.c b/aom_dsp/arm/avg_neon.c index 7e349c58a377714714a2b81d5e00e5f88ffc78f0..2f281051eba6ea5f50314f3895516a5e8776bbb6 100644 --- a/aom_dsp/arm/avg_neon.c +++ b/aom_dsp/arm/avg_neon.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" +#include "./aom_dsp_rtcd.h" +#include "./aom_config.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) { const uint32x4_t a = vpaddlq_u16(v_16x8); @@ -25,7 +25,7 @@ static INLINE unsigned int horizontal_add_u16x8(const uint16x8_t v_16x8) { return vget_lane_u32(c, 0); } -unsigned int vpx_avg_4x4_neon(const uint8_t *s, int p) { +unsigned int aom_avg_4x4_neon(const uint8_t *s, int p) { uint16x8_t v_sum; uint32x2_t v_s0 = vdup_n_u32(0); uint32x2_t v_s1 = vdup_n_u32(0); @@ -37,7 +37,7 @@ unsigned int vpx_avg_4x4_neon(const uint8_t *s, int p) { return (horizontal_add_u16x8(v_sum) + 8) >> 4; } -unsigned int vpx_avg_8x8_neon(const uint8_t *s, int p) { +unsigned int aom_avg_8x8_neon(const uint8_t *s, int p) { uint8x8_t v_s0 = vld1_u8(s); const uint8x8_t v_s1 = vld1_u8(s + p); uint16x8_t v_sum = vaddl_u8(v_s0, v_s1); @@ -65,7 +65,7 @@ unsigned int vpx_avg_8x8_neon(const uint8_t *s, int p) { // coeff: 16 bits, dynamic range [-32640, 32640]. // length: value range {16, 64, 256, 1024}. -int vpx_satd_neon(const int16_t *coeff, int length) { +int aom_satd_neon(const int16_t *coeff, int length) { const int16x4_t zero = vdup_n_s16(0); int32x4_t accum = vdupq_n_s32(0); @@ -90,7 +90,7 @@ int vpx_satd_neon(const int16_t *coeff, int length) { } } -void vpx_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref, +void aom_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref, const int ref_stride, const int height) { int i; uint16x8_t vec_sum_lo = vdupq_n_u16(0); @@ -143,7 +143,7 @@ void vpx_int_pro_row_neon(int16_t hbuf[16], uint8_t const *ref, vst1q_s16(hbuf, vreinterpretq_s16_u16(vec_sum_hi)); } -int16_t vpx_int_pro_col_neon(uint8_t const *ref, const int width) { +int16_t aom_int_pro_col_neon(uint8_t const *ref, const int width) { int i; uint16x8_t vec_sum = vdupq_n_u16(0); @@ -159,7 +159,7 @@ int16_t vpx_int_pro_col_neon(uint8_t const *ref, const int width) { // ref, src = [0, 510] - max diff = 16-bits // bwl = {2, 3, 4}, width = {16, 32, 64} -int vpx_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) { +int aom_vector_var_neon(int16_t const *ref, int16_t const *src, const int bwl) { int width = 4 << bwl; int32x4_t sse = vdupq_n_s32(0); int16x8_t total = vdupq_n_s16(0); diff --git a/aom_dsp/arm/bilinear_filter_media.asm b/aom_dsp/arm/bilinear_filter_media.asm index f3f9754c116f3cc46fbd739fe5be6ae4e6e99efc..fbbef2541c3b9be53b05834bd7f5400ceb76c231 100644 --- a/aom_dsp/arm/bilinear_filter_media.asm +++ b/aom_dsp/arm/bilinear_filter_media.asm @@ -9,8 +9,8 @@ ; - EXPORT |vpx_filter_block2d_bil_first_pass_media| - EXPORT |vpx_filter_block2d_bil_second_pass_media| + EXPORT |aom_filter_block2d_bil_first_pass_media| + EXPORT |aom_filter_block2d_bil_second_pass_media| AREA |.text|, CODE, READONLY ; name this block of code @@ -20,13 +20,13 @@ ; r2 unsigned int src_pitch, ; r3 unsigned int height, ; stack unsigned int width, -; stack const short *vpx_filter +; stack const short *aom_filter ;------------------------------------- ; The output is transposed stroed in output array to make it easy for second pass filtering. -|vpx_filter_block2d_bil_first_pass_media| PROC +|aom_filter_block2d_bil_first_pass_media| PROC stmdb sp!, {r4 - r11, lr} - ldr r11, [sp, #40] ; vpx_filter address + ldr r11, [sp, #40] ; aom_filter address ldr r4, [sp, #36] ; width mov r12, r3 ; outer-loop counter @@ -134,7 +134,7 @@ ldmia sp!, {r4 - r11, pc} - ENDP ; |vpx_filter_block2d_bil_first_pass_media| + ENDP ; |aom_filter_block2d_bil_first_pass_media| ;--------------------------------- @@ -143,12 +143,12 @@ ; r2 int dst_pitch, ; r3 unsigned int height, ; stack unsigned int width, -; stack const short *vpx_filter +; stack const short *aom_filter ;--------------------------------- -|vpx_filter_block2d_bil_second_pass_media| PROC +|aom_filter_block2d_bil_second_pass_media| PROC stmdb sp!, {r4 - r11, lr} - ldr r11, [sp, #40] ; vpx_filter address + ldr r11, [sp, #40] ; aom_filter address ldr r4, [sp, #36] ; width ldr r5, [r11] ; load up filter coefficients @@ -232,6 +232,6 @@ bne bil_height_loop_null_2nd ldmia sp!, {r4 - r11, pc} - ENDP ; |vpx_filter_block2d_second_pass_media| + ENDP ; |aom_filter_block2d_second_pass_media| END diff --git a/aom_dsp/arm/fwd_txfm_neon.c b/aom_dsp/arm/fwd_txfm_neon.c index 7dbaf9f267aac534008a96a9b8970cb56119d8ec..17ce29e51f05b6b57ca11f420a336c7c203c3f2c 100644 --- a/aom_dsp/arm/fwd_txfm_neon.c +++ b/aom_dsp/arm/fwd_txfm_neon.c @@ -11,10 +11,10 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/txfm_common.h" -void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) { +void aom_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) { int i; // stage 1 int16x8_t input_0 = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2); @@ -171,7 +171,7 @@ void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) { } } // for { - // from vpx_dct_sse2.c + // from aom_dct_sse2.c // Post-condition (division by two) // division of two 16 bits signed numbers using shifts // n / 2 = (n - (n >> 15)) >> 1 @@ -203,7 +203,7 @@ void vpx_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) { } } -void vpx_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) { +void aom_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) { int r; int16x8_t sum = vld1q_s16(&input[0]); for (r = 1; r < 8; ++r) { diff --git a/aom_dsp/arm/idct16x16_1_add_neon.asm b/aom_dsp/arm/idct16x16_1_add_neon.asm index dc459e20d9cfac383ed21824fef6345fad734ffb..e07614f8394ed8e5c35dac1a30ec9f29217bb412 100644 --- a/aom_dsp/arm/idct16x16_1_add_neon.asm +++ b/aom_dsp/arm/idct16x16_1_add_neon.asm @@ -8,21 +8,21 @@ ; - EXPORT |vpx_idct16x16_1_add_neon| + EXPORT |aom_idct16x16_1_add_neon| ARM REQUIRE8 PRESERVE8 AREA ||.text||, CODE, READONLY, ALIGN=2 -;void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, +;void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, ; int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride) -|vpx_idct16x16_1_add_neon| PROC +|aom_idct16x16_1_add_neon| PROC ldrsh r0, [r0] ; generate cospi_16_64 = 11585 @@ -193,6 +193,6 @@ vst1.64 {d31}, [r12], r2 bx lr - ENDP ; |vpx_idct16x16_1_add_neon| + ENDP ; |aom_idct16x16_1_add_neon| END diff --git a/aom_dsp/arm/idct16x16_1_add_neon.c b/aom_dsp/arm/idct16x16_1_add_neon.c index 207c3884558a7e4ad682487a0ded9ade57748232..29b7f44a1614d8ae01c3d755f90f13ecd88c4e6a 100644 --- a/aom_dsp/arm/idct16x16_1_add_neon.c +++ b/aom_dsp/arm/idct16x16_1_add_neon.c @@ -14,7 +14,7 @@ #include "aom_dsp/inv_txfm.h" #include "aom_ports/mem.h" -void vpx_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct16x16_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d2u8, d3u8, d30u8, d31u8; uint64x1_t d2u64, d3u64, d4u64, d5u64; uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; diff --git a/aom_dsp/arm/idct16x16_add_neon.asm b/aom_dsp/arm/idct16x16_add_neon.asm index 22a0c95941aa95215764df11979a8f286a375052..e22ab3e6d12b9f75bf67951405d3f33947a855b5 100644 --- a/aom_dsp/arm/idct16x16_add_neon.asm +++ b/aom_dsp/arm/idct16x16_add_neon.asm @@ -8,10 +8,10 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_idct16x16_256_add_neon_pass1| - EXPORT |vpx_idct16x16_256_add_neon_pass2| - EXPORT |vpx_idct16x16_10_add_neon_pass1| - EXPORT |vpx_idct16x16_10_add_neon_pass2| + EXPORT |aom_idct16x16_256_add_neon_pass1| + EXPORT |aom_idct16x16_256_add_neon_pass2| + EXPORT |aom_idct16x16_10_add_neon_pass1| + EXPORT |aom_idct16x16_10_add_neon_pass2| ARM REQUIRE8 PRESERVE8 @@ -36,7 +36,7 @@ MEND AREA Block, CODE, READONLY ; name this block of code -;void |vpx_idct16x16_256_add_neon_pass1|(int16_t *input, +;void |aom_idct16x16_256_add_neon_pass1|(int16_t *input, ; int16_t *output, int output_stride) ; ; r0 int16_t input @@ -46,7 +46,7 @@ ; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output ; will be stored back into q8-q15 registers. This function will touch q0-q7 ; registers and use them as buffer during calculation. -|vpx_idct16x16_256_add_neon_pass1| PROC +|aom_idct16x16_256_add_neon_pass1| PROC ; TODO(hkuang): Find a better way to load the elements. ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15 @@ -273,9 +273,9 @@ vst1.64 {d31}, [r1], r2 bx lr - ENDP ; |vpx_idct16x16_256_add_neon_pass1| + ENDP ; |aom_idct16x16_256_add_neon_pass1| -;void vpx_idct16x16_256_add_neon_pass2(int16_t *src, +;void aom_idct16x16_256_add_neon_pass2(int16_t *src, ; int16_t *output, ; int16_t *pass1Output, ; int16_t skip_adding, @@ -292,7 +292,7 @@ ; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output ; will be stored back into q8-q15 registers. This function will touch q0-q7 ; registers and use them as buffer during calculation. -|vpx_idct16x16_256_add_neon_pass2| PROC +|aom_idct16x16_256_add_neon_pass2| PROC push {r3-r9} ; TODO(hkuang): Find a better way to load the elements. @@ -784,9 +784,9 @@ skip_adding_dest end_idct16x16_pass2 pop {r3-r9} bx lr - ENDP ; |vpx_idct16x16_256_add_neon_pass2| + ENDP ; |aom_idct16x16_256_add_neon_pass2| -;void |vpx_idct16x16_10_add_neon_pass1|(int16_t *input, +;void |aom_idct16x16_10_add_neon_pass1|(int16_t *input, ; int16_t *output, int output_stride) ; ; r0 int16_t input @@ -796,7 +796,7 @@ end_idct16x16_pass2 ; idct16 stage1 - stage6 on all the elements loaded in q8-q15. The output ; will be stored back into q8-q15 registers. This function will touch q0-q7 ; registers and use them as buffer during calculation. -|vpx_idct16x16_10_add_neon_pass1| PROC +|aom_idct16x16_10_add_neon_pass1| PROC ; TODO(hkuang): Find a better way to load the elements. ; load elements of 0, 2, 4, 6, 8, 10, 12, 14 into q8 - q15 @@ -905,9 +905,9 @@ end_idct16x16_pass2 vst1.64 {d31}, [r1], r2 bx lr - ENDP ; |vpx_idct16x16_10_add_neon_pass1| + ENDP ; |aom_idct16x16_10_add_neon_pass1| -;void vpx_idct16x16_10_add_neon_pass2(int16_t *src, +;void aom_idct16x16_10_add_neon_pass2(int16_t *src, ; int16_t *output, ; int16_t *pass1Output, ; int16_t skip_adding, @@ -924,7 +924,7 @@ end_idct16x16_pass2 ; idct16 stage1 - stage7 on all the elements loaded in q8-q15. The output ; will be stored back into q8-q15 registers. This function will touch q0-q7 ; registers and use them as buffer during calculation. -|vpx_idct16x16_10_add_neon_pass2| PROC +|aom_idct16x16_10_add_neon_pass2| PROC push {r3-r9} ; TODO(hkuang): Find a better way to load the elements. @@ -1175,5 +1175,5 @@ end_idct16x16_pass2 end_idct10_16x16_pass2 pop {r3-r9} bx lr - ENDP ; |vpx_idct16x16_10_add_neon_pass2| + ENDP ; |aom_idct16x16_10_add_neon_pass2| END diff --git a/aom_dsp/arm/idct16x16_add_neon.c b/aom_dsp/arm/idct16x16_add_neon.c index c7e0d49c9c0e3d2f29cbafeae67a58f10e922212..3d545f878b63e38af09c643e43b0439400ce1df9 100644 --- a/aom_dsp/arm/idct16x16_add_neon.c +++ b/aom_dsp/arm/idct16x16_add_neon.c @@ -11,7 +11,7 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/txfm_common.h" static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16, @@ -78,7 +78,7 @@ static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16, return; } -void vpx_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, +void aom_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, int output_stride) { int16x4_t d0s16, d1s16, d2s16, d3s16; int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; @@ -314,7 +314,7 @@ void vpx_idct16x16_256_add_neon_pass1(int16_t *in, int16_t *out, return; } -void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, +void aom_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, int16_t *pass1Output, int16_t skip_adding, uint8_t *dest, int dest_stride) { uint8_t *d; @@ -863,7 +863,7 @@ void vpx_idct16x16_256_add_neon_pass2(int16_t *src, int16_t *out, return; } -void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out, +void aom_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out, int output_stride) { int16x4_t d4s16; int16x4_t d8s16, d9s16, d10s16, d11s16, d12s16, d13s16, d14s16, d15s16; @@ -999,7 +999,7 @@ void vpx_idct16x16_10_add_neon_pass1(int16_t *in, int16_t *out, return; } -void vpx_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out, +void aom_idct16x16_10_add_neon_pass2(int16_t *src, int16_t *out, int16_t *pass1Output, int16_t skip_adding, uint8_t *dest, int dest_stride) { int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16; diff --git a/aom_dsp/arm/idct16x16_neon.c b/aom_dsp/arm/idct16x16_neon.c index bf682cc5497a8f4c7cd2a4a09695af2f847b0289..db0d4905b53d1410390b6d864f76ab35ee013984 100644 --- a/aom_dsp/arm/idct16x16_neon.c +++ b/aom_dsp/arm/idct16x16_neon.c @@ -9,26 +9,26 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_dsp_common.h" -void vpx_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output, +void aom_idct16x16_256_add_neon_pass1(const int16_t *input, int16_t *output, int output_stride); -void vpx_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output, +void aom_idct16x16_256_add_neon_pass2(const int16_t *src, int16_t *output, int16_t *pass1Output, int16_t skip_adding, uint8_t *dest, int dest_stride); -void vpx_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output, +void aom_idct16x16_10_add_neon_pass1(const int16_t *input, int16_t *output, int output_stride); -void vpx_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output, +void aom_idct16x16_10_add_neon_pass2(const int16_t *src, int16_t *output, int16_t *pass1Output, int16_t skip_adding, uint8_t *dest, int dest_stride); #if HAVE_NEON_ASM /* For ARM NEON, d8-d15 are callee-saved registers, and need to be saved. */ -extern void vpx_push_neon(int64_t *store); -extern void vpx_pop_neon(int64_t *store); +extern void aom_push_neon(int64_t *store); +extern void aom_pop_neon(int64_t *store); #endif // HAVE_NEON_ASM -void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest, +void aom_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest, int dest_stride) { #if HAVE_NEON_ASM int64_t store_reg[8]; @@ -38,63 +38,63 @@ void vpx_idct16x16_256_add_neon(const int16_t *input, uint8_t *dest, #if HAVE_NEON_ASM // save d8-d15 register values. - vpx_push_neon(store_reg); + aom_push_neon(store_reg); #endif /* Parallel idct on the upper 8 rows */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(input, pass1_output, 8); + aom_idct16x16_256_add_neon_pass1(input, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7 // which will be saved into row_idct_output. - vpx_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0, + aom_idct16x16_256_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0, dest, dest_stride); /* Parallel idct on the lower 8 rows */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8); + aom_idct16x16_256_add_neon_pass1(input + 8 * 16, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7 // which will be saved into row_idct_output. - vpx_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8, + aom_idct16x16_256_add_neon_pass2(input + 8 * 16 + 1, row_idct_output + 8, pass1_output, 0, dest, dest_stride); /* Parallel idct on the left 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8); + aom_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output, + aom_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output, pass1_output, 1, dest, dest_stride); /* Parallel idct on the right 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8); + aom_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1, + aom_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1, row_idct_output + 8, pass1_output, 1, dest + 8, dest_stride); #if HAVE_NEON_ASM // restore d8-d15 register values. - vpx_pop_neon(store_reg); + aom_pop_neon(store_reg); #endif return; } -void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest, +void aom_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest, int dest_stride) { #if HAVE_NEON_ASM int64_t store_reg[8]; @@ -104,18 +104,18 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest, #if HAVE_NEON_ASM // save d8-d15 register values. - vpx_push_neon(store_reg); + aom_push_neon(store_reg); #endif /* Parallel idct on the upper 8 rows */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_10_add_neon_pass1(input, pass1_output, 8); + aom_idct16x16_10_add_neon_pass1(input, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7 // which will be saved into row_idct_output. - vpx_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0, + aom_idct16x16_10_add_neon_pass2(input + 1, row_idct_output, pass1_output, 0, dest, dest_stride); /* Skip Parallel idct on the lower 8 rows as they are all 0s */ @@ -123,29 +123,29 @@ void vpx_idct16x16_10_add_neon(const int16_t *input, uint8_t *dest, /* Parallel idct on the left 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8); + aom_idct16x16_256_add_neon_pass1(row_idct_output, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output, + aom_idct16x16_256_add_neon_pass2(row_idct_output + 1, row_idct_output, pass1_output, 1, dest, dest_stride); /* Parallel idct on the right 8 columns */ // First pass processes even elements 0, 2, 4, 6, 8, 10, 12, 14 and save the // stage 6 result in pass1_output. - vpx_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8); + aom_idct16x16_256_add_neon_pass1(row_idct_output + 8 * 16, pass1_output, 8); // Second pass processes odd elements 1, 3, 5, 7, 9, 11, 13, 15 and combines // with result in pass1(pass1_output) to calculate final result in stage 7. // Then add the result to the destination data. - vpx_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1, + aom_idct16x16_256_add_neon_pass2(row_idct_output + 8 * 16 + 1, row_idct_output + 8, pass1_output, 1, dest + 8, dest_stride); #if HAVE_NEON_ASM // restore d8-d15 register values. - vpx_pop_neon(store_reg); + aom_pop_neon(store_reg); #endif return; diff --git a/aom_dsp/arm/idct32x32_1_add_neon.asm b/aom_dsp/arm/idct32x32_1_add_neon.asm index 96d276b4d1404a3753e63c12046be6b7bfdd1d98..9b31287b580f5ec7d7e1b045be362af1575b9d89 100644 --- a/aom_dsp/arm/idct32x32_1_add_neon.asm +++ b/aom_dsp/arm/idct32x32_1_add_neon.asm @@ -7,7 +7,7 @@ ; file in the root of the source tree. ; - EXPORT |vpx_idct32x32_1_add_neon| + EXPORT |aom_idct32x32_1_add_neon| ARM REQUIRE8 PRESERVE8 @@ -64,14 +64,14 @@ vst1.8 {q15},[$dst], $stride MEND -;void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, +;void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, ; int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride -|vpx_idct32x32_1_add_neon| PROC +|aom_idct32x32_1_add_neon| PROC push {lr} pld [r1] add r3, r1, #16 ; r3 dest + 16 for second loop @@ -140,5 +140,5 @@ diff_positive_32_32_loop bne diff_positive_32_32_loop pop {pc} - ENDP ; |vpx_idct32x32_1_add_neon| + ENDP ; |aom_idct32x32_1_add_neon| END diff --git a/aom_dsp/arm/idct32x32_1_add_neon.c b/aom_dsp/arm/idct32x32_1_add_neon.c index d9d5a0dde923f655b31e04081f86cf36aeffb375..768cf799acf09ff9ae05d095fc7a854fb803ce6c 100644 --- a/aom_dsp/arm/idct32x32_1_add_neon.c +++ b/aom_dsp/arm/idct32x32_1_add_neon.c @@ -11,7 +11,7 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/inv_txfm.h" #include "aom_ports/mem.h" @@ -94,7 +94,7 @@ static INLINE void ST_16x8(uint8_t *d, int d_stride, uint8x16_t *q8u8, return; } -void vpx_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct32x32_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x16_t q0u8, q8u8, q9u8, q10u8, q11u8, q12u8, q13u8, q14u8, q15u8; int i, j, dest_stride8; uint8_t *d; diff --git a/aom_dsp/arm/idct32x32_add_neon.asm b/aom_dsp/arm/idct32x32_add_neon.asm index 7483ee77e18486f62fb2eb91f712612072361cf6..10de48298034d9d3118730418b1f25f4151441ae 100644 --- a/aom_dsp/arm/idct32x32_add_neon.asm +++ b/aom_dsp/arm/idct32x32_add_neon.asm @@ -43,7 +43,7 @@ cospi_30_64 EQU 1606 cospi_31_64 EQU 804 - EXPORT |vpx_idct32x32_1024_add_neon| + EXPORT |aom_idct32x32_1024_add_neon| ARM REQUIRE8 PRESERVE8 @@ -288,7 +288,7 @@ cospi_31_64 EQU 804 MEND ; -------------------------------------------------------------------------- -;void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int dest_stride); +;void aom_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int dest_stride); ; ; r0 int16_t *input, ; r1 uint8_t *dest, @@ -303,7 +303,7 @@ cospi_31_64 EQU 804 ; r9 dest + 15 * dest_stride, descending (14, 13, 12, ...) ; r10 dest + 16 * dest_stride, ascending (17, 18, 19, ...) -|vpx_idct32x32_1024_add_neon| PROC +|aom_idct32x32_1024_add_neon| PROC ; This function does one pass of idct32x32 transform. ; ; This is done by transposing the input and then doing a 1d transform on @@ -1295,5 +1295,5 @@ idct32_bands_end_2nd_pass vpop {d8-d15} pop {r4-r11} bx lr - ENDP ; |vpx_idct32x32_1024_add_neon| + ENDP ; |aom_idct32x32_1024_add_neon| END diff --git a/aom_dsp/arm/idct32x32_add_neon.c b/aom_dsp/arm/idct32x32_add_neon.c index 01d8169b5e56d3691ff064cc60f75656b35cecb3..a7562c7d5dfa945eac3e5ebdd6ac5d8ba311e2e9 100644 --- a/aom_dsp/arm/idct32x32_add_neon.c +++ b/aom_dsp/arm/idct32x32_add_neon.c @@ -11,7 +11,7 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/txfm_common.h" #define LOAD_FROM_TRANSPOSED(prev, first, second) \ @@ -428,7 +428,7 @@ static INLINE void idct32_bands_end_2nd_pass( return; } -void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) { +void aom_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) { int i, idct32_pass_loop; int16_t trans_buf[32 * 8]; int16_t pass1[32 * 32]; diff --git a/aom_dsp/arm/idct4x4_1_add_neon.asm b/aom_dsp/arm/idct4x4_1_add_neon.asm index adab715dde50079d96d9b165dfa4a66d088d92bf..145752774504f69738b51c09734b2f8ce6774452 100644 --- a/aom_dsp/arm/idct4x4_1_add_neon.asm +++ b/aom_dsp/arm/idct4x4_1_add_neon.asm @@ -8,21 +8,21 @@ ; - EXPORT |vpx_idct4x4_1_add_neon| + EXPORT |aom_idct4x4_1_add_neon| ARM REQUIRE8 PRESERVE8 AREA ||.text||, CODE, READONLY, ALIGN=2 -;void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, +;void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, ; int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride) -|vpx_idct4x4_1_add_neon| PROC +|aom_idct4x4_1_add_neon| PROC ldrsh r0, [r0] ; generate cospi_16_64 = 11585 @@ -63,6 +63,6 @@ vst1.32 {d7[1]}, [r12] bx lr - ENDP ; |vpx_idct4x4_1_add_neon| + ENDP ; |aom_idct4x4_1_add_neon| END diff --git a/aom_dsp/arm/idct4x4_1_add_neon.c b/aom_dsp/arm/idct4x4_1_add_neon.c index 64c4fe10a3abf838af1ead0fa30082d0e503a36c..d801eb60b066c4f29975a3ed6b54d84ebfebc8d4 100644 --- a/aom_dsp/arm/idct4x4_1_add_neon.c +++ b/aom_dsp/arm/idct4x4_1_add_neon.c @@ -14,7 +14,7 @@ #include "aom_dsp/inv_txfm.h" #include "aom_ports/mem.h" -void vpx_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct4x4_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d6u8; uint32x2_t d2u32 = vdup_n_u32(0); uint16x8_t q8u16; diff --git a/aom_dsp/arm/idct4x4_add_neon.asm b/aom_dsp/arm/idct4x4_add_neon.asm index 877fbd634359538fd901e750cb480d4f5e0442ba..d240f335d4db75699f907102a4d663b6e595d742 100644 --- a/aom_dsp/arm/idct4x4_add_neon.asm +++ b/aom_dsp/arm/idct4x4_add_neon.asm @@ -8,7 +8,7 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_idct4x4_16_add_neon| + EXPORT |aom_idct4x4_16_add_neon| ARM REQUIRE8 PRESERVE8 @@ -16,13 +16,13 @@ AREA ||.text||, CODE, READONLY, ALIGN=2 AREA Block, CODE, READONLY ; name this block of code -;void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) +;void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride) -|vpx_idct4x4_16_add_neon| PROC +|aom_idct4x4_16_add_neon| PROC ; The 2D transform is done with two passes which are actually pretty ; similar. We first transform the rows. This is done by transposing @@ -185,6 +185,6 @@ vst1.32 {d26[1]}, [r1], r2 vst1.32 {d26[0]}, [r1] ; no post-increment bx lr - ENDP ; |vpx_idct4x4_16_add_neon| + ENDP ; |aom_idct4x4_16_add_neon| END diff --git a/aom_dsp/arm/idct4x4_add_neon.c b/aom_dsp/arm/idct4x4_add_neon.c index 165cd0f7deff7398880c4a747c81ae0dfcb4376b..397c61709ff576091b140f0f11f142e2e8baf6de 100644 --- a/aom_dsp/arm/idct4x4_add_neon.c +++ b/aom_dsp/arm/idct4x4_add_neon.c @@ -11,7 +11,7 @@ #include -void vpx_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct4x4_16_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d26u8, d27u8; uint32x2_t d26u32, d27u32; uint16x8_t q8u16, q9u16; diff --git a/aom_dsp/arm/idct8x8_1_add_neon.asm b/aom_dsp/arm/idct8x8_1_add_neon.asm index dbbff364f372198bb1cb9443b552340d7bde97f6..d2b410de2deb01946cd9b8e69cc83fc9ee3391dd 100644 --- a/aom_dsp/arm/idct8x8_1_add_neon.asm +++ b/aom_dsp/arm/idct8x8_1_add_neon.asm @@ -8,21 +8,21 @@ ; - EXPORT |vpx_idct8x8_1_add_neon| + EXPORT |aom_idct8x8_1_add_neon| ARM REQUIRE8 PRESERVE8 AREA ||.text||, CODE, READONLY, ALIGN=2 -;void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, +;void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, ; int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride) -|vpx_idct8x8_1_add_neon| PROC +|aom_idct8x8_1_add_neon| PROC ldrsh r0, [r0] ; generate cospi_16_64 = 11585 @@ -83,6 +83,6 @@ vst1.64 {d31}, [r12], r2 bx lr - ENDP ; |vpx_idct8x8_1_add_neon| + ENDP ; |aom_idct8x8_1_add_neon| END diff --git a/aom_dsp/arm/idct8x8_1_add_neon.c b/aom_dsp/arm/idct8x8_1_add_neon.c index bb6fa740199cb2668d4e043cfa1e4daa3f7f169d..fcc2a2fcd29b547b979d3629b30b7ee27c493f96 100644 --- a/aom_dsp/arm/idct8x8_1_add_neon.c +++ b/aom_dsp/arm/idct8x8_1_add_neon.c @@ -14,7 +14,7 @@ #include "aom_dsp/inv_txfm.h" #include "aom_ports/mem.h" -void vpx_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct8x8_1_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8x8_t d2u8, d3u8, d30u8, d31u8; uint64x1_t d2u64, d3u64, d4u64, d5u64; uint16x8_t q0u16, q9u16, q10u16, q11u16, q12u16; diff --git a/aom_dsp/arm/idct8x8_add_neon.asm b/aom_dsp/arm/idct8x8_add_neon.asm index 6ab59b41b7459f89aa479eab8a45bb4746a87b06..a03c83d95879f359570631ea24b99786cd35f6bc 100644 --- a/aom_dsp/arm/idct8x8_add_neon.asm +++ b/aom_dsp/arm/idct8x8_add_neon.asm @@ -8,8 +8,8 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_idct8x8_64_add_neon| - EXPORT |vpx_idct8x8_12_add_neon| + EXPORT |aom_idct8x8_64_add_neon| + EXPORT |aom_idct8x8_12_add_neon| ARM REQUIRE8 PRESERVE8 @@ -198,13 +198,13 @@ MEND AREA Block, CODE, READONLY ; name this block of code -;void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) +;void aom_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride) -|vpx_idct8x8_64_add_neon| PROC +|aom_idct8x8_64_add_neon| PROC push {r4-r9} vpush {d8-d15} vld1.s16 {q8,q9}, [r0]! @@ -308,15 +308,15 @@ vpop {d8-d15} pop {r4-r9} bx lr - ENDP ; |vpx_idct8x8_64_add_neon| + ENDP ; |aom_idct8x8_64_add_neon| -;void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) +;void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) ; ; r0 int16_t input ; r1 uint8_t *dest ; r2 int dest_stride) -|vpx_idct8x8_12_add_neon| PROC +|aom_idct8x8_12_add_neon| PROC push {r4-r9} vpush {d8-d15} vld1.s16 {q8,q9}, [r0]! @@ -514,6 +514,6 @@ vpop {d8-d15} pop {r4-r9} bx lr - ENDP ; |vpx_idct8x8_12_add_neon| + ENDP ; |aom_idct8x8_12_add_neon| END diff --git a/aom_dsp/arm/idct8x8_add_neon.c b/aom_dsp/arm/idct8x8_add_neon.c index 44f1547c7ff6e64298a45f93686e5734645cf365..8e752105b6e59f8036822bb5a5835a133edd916c 100644 --- a/aom_dsp/arm/idct8x8_add_neon.c +++ b/aom_dsp/arm/idct8x8_add_neon.c @@ -11,7 +11,7 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/txfm_common.h" static INLINE void TRANSPOSE8X8(int16x8_t *q8s16, int16x8_t *q9s16, @@ -229,7 +229,7 @@ static INLINE void IDCT8x8_1D(int16x8_t *q8s16, int16x8_t *q9s16, return; } -void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8_t *d1, *d2; uint8x8_t d0u8, d1u8, d2u8, d3u8; uint64x1_t d0u64, d1u64, d2u64, d3u64; @@ -331,7 +331,7 @@ void vpx_idct8x8_64_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { return; } -void vpx_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { +void aom_idct8x8_12_add_neon(int16_t *input, uint8_t *dest, int dest_stride) { uint8_t *d1, *d2; uint8x8_t d0u8, d1u8, d2u8, d3u8; int16x4_t d10s16, d11s16, d12s16, d13s16, d16s16; diff --git a/aom_dsp/arm/intrapred_neon.c b/aom_dsp/arm/intrapred_neon.c index 476920452bbdff0c91d4223c073a220a959089cf..0cdba9a8bccd4c9c16cd11ef9d29ad453d9618e8 100644 --- a/aom_dsp/arm/intrapred_neon.c +++ b/aom_dsp/arm/intrapred_neon.c @@ -11,9 +11,9 @@ #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" //------------------------------------------------------------------------------ // DC 4x4 @@ -59,24 +59,24 @@ static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, } } -void vpx_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { dc_4x4(dst, stride, above, left, 1, 1); } -void vpx_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; dc_4x4(dst, stride, NULL, left, 0, 1); } -void vpx_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; dc_4x4(dst, stride, above, NULL, 1, 0); } -void vpx_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; (void)left; @@ -129,24 +129,24 @@ static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, } } -void vpx_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { dc_8x8(dst, stride, above, left, 1, 1); } -void vpx_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; dc_8x8(dst, stride, NULL, left, 0, 1); } -void vpx_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; dc_8x8(dst, stride, above, NULL, 1, 0); } -void vpx_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; (void)left; @@ -202,26 +202,26 @@ static INLINE void dc_16x16(uint8_t *dst, ptrdiff_t stride, } } -void vpx_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { dc_16x16(dst, stride, above, left, 1, 1); } -void vpx_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; dc_16x16(dst, stride, NULL, left, 0, 1); } -void vpx_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; dc_16x16(dst, stride, above, NULL, 1, 0); } -void vpx_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; @@ -285,26 +285,26 @@ static INLINE void dc_32x32(uint8_t *dst, ptrdiff_t stride, } } -void vpx_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { dc_32x32(dst, stride, above, left, 1, 1); } -void vpx_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; dc_32x32(dst, stride, NULL, left, 0, 1); } -void vpx_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)left; dc_32x32(dst, stride, above, NULL, 1, 0); } -void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { (void)above; @@ -314,7 +314,7 @@ void vpx_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, // ----------------------------------------------------------------------------- -void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const uint64x1_t A0 = vreinterpret_u64_u8(vld1_u8(above)); // top row const uint64x1_t A1 = vshr_n_u64(A0, 8); @@ -337,7 +337,7 @@ void vpx_d45_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, dst[3 * stride + 3] = above[7]; } -void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { static const uint8_t shuffle1[8] = { 1, 2, 3, 4, 5, 6, 7, 7 }; static const uint8_t shuffle2[8] = { 2, 3, 4, 5, 6, 7, 7, 7 }; @@ -357,7 +357,7 @@ void vpx_d45_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, vst1_u8(dst + i * stride, row); } -void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const uint8x16_t A0 = vld1q_u8(above); // top row const uint8x16_t above_right = vld1q_dup_u8(above + 15); @@ -376,7 +376,7 @@ void vpx_d45_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, // ----------------------------------------------------------------------------- -void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const uint8x8_t XABCD_u8 = vld1_u8(above - 1); const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8); @@ -406,7 +406,7 @@ void vpx_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, #if !HAVE_NEON_ASM -void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint32x2_t d0u32 = vdup_n_u32(0); @@ -417,7 +417,7 @@ void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, vst1_lane_u32((uint32_t *)dst, d0u32, 0); } -void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint8x8_t d0u8 = vdup_n_u8(0); @@ -427,7 +427,7 @@ void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8); } -void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint8x16_t q0u8 = vdupq_n_u8(0); @@ -437,7 +437,7 @@ void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8); } -void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint8x16_t q0u8 = vdupq_n_u8(0); @@ -452,7 +452,7 @@ void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, } } -void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { uint8x8_t d0u8 = vdup_n_u8(0); uint32x2_t d1u32 = vdup_n_u32(0); @@ -473,7 +473,7 @@ void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0); } -void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { uint8x8_t d0u8 = vdup_n_u8(0); uint64x1_t d1u64 = vdup_n_u64(0); @@ -506,7 +506,7 @@ void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, vst1_u8(dst, d0u8); } -void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j; uint8x8_t d2u8 = vdup_n_u8(0); @@ -544,7 +544,7 @@ void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, } } -void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j, k; uint8x8_t d2u8 = vdup_n_u8(0); @@ -592,7 +592,7 @@ void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, } } -void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, +void aom_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int i; uint16x8_t q1u16, q3u16; @@ -612,7 +612,7 @@ void vpx_tm_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride, } } -void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, +void aom_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j; uint16x8_t q0u16, q3u16, q10u16; @@ -654,7 +654,7 @@ void vpx_tm_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride, } } -void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, +void aom_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j, k; uint16x8_t q0u16, q2u16, q3u16, q8u16, q10u16; @@ -717,7 +717,7 @@ void vpx_tm_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride, } } -void vpx_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, +void aom_tm_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int j, k; uint16x8_t q0u16, q3u16, q8u16, q9u16, q10u16, q11u16; diff --git a/aom_dsp/arm/intrapred_neon_asm.asm b/aom_dsp/arm/intrapred_neon_asm.asm index 115790d4801adfd5703a67ebd1878834a81b9513..6014a09f241dc73b6a1cf69ee3489253cf177502 100644 --- a/aom_dsp/arm/intrapred_neon_asm.asm +++ b/aom_dsp/arm/intrapred_neon_asm.asm @@ -8,25 +8,25 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_v_predictor_4x4_neon| - EXPORT |vpx_v_predictor_8x8_neon| - EXPORT |vpx_v_predictor_16x16_neon| - EXPORT |vpx_v_predictor_32x32_neon| - EXPORT |vpx_h_predictor_4x4_neon| - EXPORT |vpx_h_predictor_8x8_neon| - EXPORT |vpx_h_predictor_16x16_neon| - EXPORT |vpx_h_predictor_32x32_neon| - EXPORT |vpx_tm_predictor_4x4_neon| - EXPORT |vpx_tm_predictor_8x8_neon| - EXPORT |vpx_tm_predictor_16x16_neon| - EXPORT |vpx_tm_predictor_32x32_neon| + EXPORT |aom_v_predictor_4x4_neon| + EXPORT |aom_v_predictor_8x8_neon| + EXPORT |aom_v_predictor_16x16_neon| + EXPORT |aom_v_predictor_32x32_neon| + EXPORT |aom_h_predictor_4x4_neon| + EXPORT |aom_h_predictor_8x8_neon| + EXPORT |aom_h_predictor_16x16_neon| + EXPORT |aom_h_predictor_32x32_neon| + EXPORT |aom_tm_predictor_4x4_neon| + EXPORT |aom_tm_predictor_8x8_neon| + EXPORT |aom_tm_predictor_16x16_neon| + EXPORT |aom_tm_predictor_32x32_neon| ARM REQUIRE8 PRESERVE8 AREA ||.text||, CODE, READONLY, ALIGN=2 -;void vpx_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -34,16 +34,16 @@ ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_v_predictor_4x4_neon| PROC +|aom_v_predictor_4x4_neon| PROC vld1.32 {d0[0]}, [r2] vst1.32 {d0[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 vst1.32 {d0[0]}, [r0], r1 bx lr - ENDP ; |vpx_v_predictor_4x4_neon| + ENDP ; |aom_v_predictor_4x4_neon| -;void vpx_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -51,7 +51,7 @@ ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_v_predictor_8x8_neon| PROC +|aom_v_predictor_8x8_neon| PROC vld1.8 {d0}, [r2] vst1.8 {d0}, [r0], r1 vst1.8 {d0}, [r0], r1 @@ -62,9 +62,9 @@ vst1.8 {d0}, [r0], r1 vst1.8 {d0}, [r0], r1 bx lr - ENDP ; |vpx_v_predictor_8x8_neon| + ENDP ; |aom_v_predictor_8x8_neon| -;void vpx_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -72,7 +72,7 @@ ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_v_predictor_16x16_neon| PROC +|aom_v_predictor_16x16_neon| PROC vld1.8 {q0}, [r2] vst1.8 {q0}, [r0], r1 vst1.8 {q0}, [r0], r1 @@ -91,9 +91,9 @@ vst1.8 {q0}, [r0], r1 vst1.8 {q0}, [r0], r1 bx lr - ENDP ; |vpx_v_predictor_16x16_neon| + ENDP ; |aom_v_predictor_16x16_neon| -;void vpx_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -101,7 +101,7 @@ ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_v_predictor_32x32_neon| PROC +|aom_v_predictor_32x32_neon| PROC vld1.8 {q0, q1}, [r2] mov r2, #2 loop_v @@ -124,9 +124,9 @@ loop_v subs r2, r2, #1 bgt loop_v bx lr - ENDP ; |vpx_v_predictor_32x32_neon| + ENDP ; |aom_v_predictor_32x32_neon| -;void vpx_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -134,7 +134,7 @@ loop_v ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_h_predictor_4x4_neon| PROC +|aom_h_predictor_4x4_neon| PROC vld1.32 {d1[0]}, [r3] vdup.8 d0, d1[0] vst1.32 {d0[0]}, [r0], r1 @@ -145,9 +145,9 @@ loop_v vdup.8 d0, d1[3] vst1.32 {d0[0]}, [r0], r1 bx lr - ENDP ; |vpx_h_predictor_4x4_neon| + ENDP ; |aom_h_predictor_4x4_neon| -;void vpx_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -155,7 +155,7 @@ loop_v ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_h_predictor_8x8_neon| PROC +|aom_h_predictor_8x8_neon| PROC vld1.64 {d1}, [r3] vdup.8 d0, d1[0] vst1.64 {d0}, [r0], r1 @@ -174,9 +174,9 @@ loop_v vdup.8 d0, d1[7] vst1.64 {d0}, [r0], r1 bx lr - ENDP ; |vpx_h_predictor_8x8_neon| + ENDP ; |aom_h_predictor_8x8_neon| -;void vpx_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -184,7 +184,7 @@ loop_v ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_h_predictor_16x16_neon| PROC +|aom_h_predictor_16x16_neon| PROC vld1.8 {q1}, [r3] vdup.8 q0, d2[0] vst1.8 {q0}, [r0], r1 @@ -219,9 +219,9 @@ loop_v vdup.8 q0, d3[7] vst1.8 {q0}, [r0], r1 bx lr - ENDP ; |vpx_h_predictor_16x16_neon| + ENDP ; |aom_h_predictor_16x16_neon| -;void vpx_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, +;void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -229,7 +229,7 @@ loop_v ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_h_predictor_32x32_neon| PROC +|aom_h_predictor_32x32_neon| PROC sub r1, r1, #16 mov r2, #2 loop_h @@ -285,9 +285,9 @@ loop_h subs r2, r2, #1 bgt loop_h bx lr - ENDP ; |vpx_h_predictor_32x32_neon| + ENDP ; |aom_h_predictor_32x32_neon| -;void vpx_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride, +;void aom_tm_predictor_4x4_neon (uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -295,7 +295,7 @@ loop_h ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_tm_predictor_4x4_neon| PROC +|aom_tm_predictor_4x4_neon| PROC ; Load ytop_left = above[-1]; sub r12, r2, #1 vld1.u8 {d0[]}, [r12] @@ -331,9 +331,9 @@ loop_h vst1.32 {d0[0]}, [r0], r1 vst1.32 {d1[0]}, [r0], r1 bx lr - ENDP ; |vpx_tm_predictor_4x4_neon| + ENDP ; |aom_tm_predictor_4x4_neon| -;void vpx_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride, +;void aom_tm_predictor_8x8_neon (uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -341,7 +341,7 @@ loop_h ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_tm_predictor_8x8_neon| PROC +|aom_tm_predictor_8x8_neon| PROC ; Load ytop_left = above[-1]; sub r12, r2, #1 vld1.8 {d0[]}, [r12] @@ -403,9 +403,9 @@ loop_h vst1.64 {d3}, [r0], r1 bx lr - ENDP ; |vpx_tm_predictor_8x8_neon| + ENDP ; |aom_tm_predictor_8x8_neon| -;void vpx_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride, +;void aom_tm_predictor_16x16_neon (uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -413,7 +413,7 @@ loop_h ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_tm_predictor_16x16_neon| PROC +|aom_tm_predictor_16x16_neon| PROC ; Load ytop_left = above[-1]; sub r12, r2, #1 vld1.8 {d0[]}, [r12] @@ -496,9 +496,9 @@ loop_16x16_neon bgt loop_16x16_neon bx lr - ENDP ; |vpx_tm_predictor_16x16_neon| + ENDP ; |aom_tm_predictor_16x16_neon| -;void vpx_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride, +;void aom_tm_predictor_32x32_neon (uint8_t *dst, ptrdiff_t y_stride, ; const uint8_t *above, ; const uint8_t *left) ; r0 uint8_t *dst @@ -506,7 +506,7 @@ loop_16x16_neon ; r2 const uint8_t *above ; r3 const uint8_t *left -|vpx_tm_predictor_32x32_neon| PROC +|aom_tm_predictor_32x32_neon| PROC ; Load ytop_left = above[-1]; sub r12, r2, #1 vld1.8 {d0[]}, [r12] @@ -625,6 +625,6 @@ loop_32x32_neon bgt loop_32x32_neon bx lr - ENDP ; |vpx_tm_predictor_32x32_neon| + ENDP ; |aom_tm_predictor_32x32_neon| END diff --git a/aom_dsp/arm/loopfilter_16_neon.asm b/aom_dsp/arm/loopfilter_16_neon.asm index 5a8fdd6aff7b20d301f7b0fcded6401d7f64596a..1f2fc41fcb413196e76c19adc51c48aef1b559d3 100644 --- a/aom_dsp/arm/loopfilter_16_neon.asm +++ b/aom_dsp/arm/loopfilter_16_neon.asm @@ -8,12 +8,12 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_lpf_horizontal_4_dual_neon| + EXPORT |aom_lpf_horizontal_4_dual_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 -;void vpx_lpf_horizontal_4_dual_neon(uint8_t *s, int p, +;void aom_lpf_horizontal_4_dual_neon(uint8_t *s, int p, ; const uint8_t *blimit0, ; const uint8_t *limit0, ; const uint8_t *thresh0, @@ -29,7 +29,7 @@ ; sp+8 const uint8_t *limit1, ; sp+12 const uint8_t *thresh1, -|vpx_lpf_horizontal_4_dual_neon| PROC +|aom_lpf_horizontal_4_dual_neon| PROC push {lr} ldr r12, [sp, #4] ; load thresh0 @@ -66,7 +66,7 @@ sub r2, r2, r1, lsl #1 sub r3, r3, r1, lsl #1 - bl vpx_loop_filter_neon_16 + bl aom_loop_filter_neon_16 vst1.u8 {q5}, [r2@64], r1 ; store op1 vst1.u8 {q6}, [r3@64], r1 ; store op0 @@ -76,9 +76,9 @@ vpop {d8-d15} ; restore neon registers pop {pc} - ENDP ; |vpx_lpf_horizontal_4_dual_neon| + ENDP ; |aom_lpf_horizontal_4_dual_neon| -; void vpx_loop_filter_neon_16(); +; void aom_loop_filter_neon_16(); ; This is a helper function for the loopfilters. The invidual functions do the ; necessary load, transpose (if necessary) and store. This function uses ; registers d8-d15, so the calling function must save those registers. @@ -101,7 +101,7 @@ ; q6 op0 ; q7 oq0 ; q8 oq1 -|vpx_loop_filter_neon_16| PROC +|aom_loop_filter_neon_16| PROC ; filter_mask vabd.u8 q11, q3, q4 ; m1 = abs(p3 - p2) @@ -194,6 +194,6 @@ veor q8, q12, q10 ; *oq1 = u^0x80 bx lr - ENDP ; |vpx_loop_filter_neon_16| + ENDP ; |aom_loop_filter_neon_16| END diff --git a/aom_dsp/arm/loopfilter_16_neon.c b/aom_dsp/arm/loopfilter_16_neon.c index 36f34fea2ed030aa08efb4442f5e364b82f4a944..686d0fa04ed52e55a0d4057374edcd8617c4c1ea 100644 --- a/aom_dsp/arm/loopfilter_16_neon.c +++ b/aom_dsp/arm/loopfilter_16_neon.c @@ -11,9 +11,9 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" static INLINE void loop_filter_neon_16(uint8x16_t qblimit, // blimit uint8x16_t qlimit, // limit @@ -123,7 +123,7 @@ static INLINE void loop_filter_neon_16(uint8x16_t qblimit, // blimit return; } -void vpx_lpf_horizontal_4_dual_neon( +void aom_lpf_horizontal_4_dual_neon( uint8_t *s, int p /* pitch */, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { diff --git a/aom_dsp/arm/loopfilter_4_neon.asm b/aom_dsp/arm/loopfilter_4_neon.asm index e45e34cd4c947f2ca331b2a3108116b09572ce44..1de9b3520cea7ffd1513b0907a2fe226f3f7e370 100644 --- a/aom_dsp/arm/loopfilter_4_neon.asm +++ b/aom_dsp/arm/loopfilter_4_neon.asm @@ -8,18 +8,18 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_lpf_horizontal_4_neon| - EXPORT |vpx_lpf_vertical_4_neon| + EXPORT |aom_lpf_horizontal_4_neon| + EXPORT |aom_lpf_vertical_4_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 -; Currently vpx only works on iterations 8 at a time. The vp8 loop filter +; Currently aom only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. ; TODO(fgalligan): See about removing the count code as this function is only ; called with a count of 1. ; -; void vpx_lpf_horizontal_4_neon(uint8_t *s, +; void aom_lpf_horizontal_4_neon(uint8_t *s, ; int p /* pitch */, ; const uint8_t *blimit, ; const uint8_t *limit, @@ -32,7 +32,7 @@ ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vpx_lpf_horizontal_4_neon| PROC +|aom_lpf_horizontal_4_neon| PROC push {lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -41,7 +41,7 @@ add r1, r1, r1 ; double pitch cmp r12, #0 - beq end_vpx_lf_h_edge + beq end_aom_lf_h_edge vld1.8 {d1[]}, [r3] ; duplicate *limit vld1.8 {d2[]}, [r2] ; duplicate *thresh @@ -62,7 +62,7 @@ count_lf_h_loop sub r2, r2, r1, lsl #1 sub r3, r3, r1, lsl #1 - bl vpx_loop_filter_neon + bl aom_loop_filter_neon vst1.u8 {d4}, [r2@64], r1 ; store op1 vst1.u8 {d5}, [r3@64], r1 ; store op0 @@ -73,16 +73,16 @@ count_lf_h_loop subs r12, r12, #1 bne count_lf_h_loop -end_vpx_lf_h_edge +end_aom_lf_h_edge pop {pc} - ENDP ; |vpx_lpf_horizontal_4_neon| + ENDP ; |aom_lpf_horizontal_4_neon| -; Currently vpx only works on iterations 8 at a time. The vp8 loop filter +; Currently aom only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. ; TODO(fgalligan): See about removing the count code as this function is only ; called with a count of 1. ; -; void vpx_lpf_vertical_4_neon(uint8_t *s, +; void aom_lpf_vertical_4_neon(uint8_t *s, ; int p /* pitch */, ; const uint8_t *blimit, ; const uint8_t *limit, @@ -95,7 +95,7 @@ end_vpx_lf_h_edge ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vpx_lpf_vertical_4_neon| PROC +|aom_lpf_vertical_4_neon| PROC push {lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -105,7 +105,7 @@ end_vpx_lf_h_edge ldr r3, [sp, #4] ; load thresh sub r2, r0, #4 ; move s pointer down by 4 columns cmp r12, #0 - beq end_vpx_lf_v_edge + beq end_aom_lf_v_edge vld1.8 {d2[]}, [r3] ; duplicate *thresh @@ -135,7 +135,7 @@ count_lf_v_loop vtrn.8 d7, d16 vtrn.8 d17, d18 - bl vpx_loop_filter_neon + bl aom_loop_filter_neon sub r0, r0, #2 @@ -154,11 +154,11 @@ count_lf_v_loop subne r2, r0, #4 ; move s pointer down by 4 columns bne count_lf_v_loop -end_vpx_lf_v_edge +end_aom_lf_v_edge pop {pc} - ENDP ; |vpx_lpf_vertical_4_neon| + ENDP ; |aom_lpf_vertical_4_neon| -; void vpx_loop_filter_neon(); +; void aom_loop_filter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the ; necessary load, transpose (if necessary) and store. The function does not use ; registers d8-d15. @@ -182,7 +182,7 @@ end_vpx_lf_v_edge ; d5 op0 ; d6 oq0 ; d7 oq1 -|vpx_loop_filter_neon| PROC +|aom_loop_filter_neon| PROC ; filter_mask vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2) vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1) @@ -272,6 +272,6 @@ end_vpx_lf_v_edge veor d7, d20, d18 ; *oq1 = u^0x80 bx lr - ENDP ; |vpx_loop_filter_neon| + ENDP ; |aom_loop_filter_neon| END diff --git a/aom_dsp/arm/loopfilter_4_neon.c b/aom_dsp/arm/loopfilter_4_neon.c index 9be39ac084de06279e9ac35849011c1bdbfa5600..cc75228303d23478cdfd58e29a4b8d44a67897c7 100644 --- a/aom_dsp/arm/loopfilter_4_neon.c +++ b/aom_dsp/arm/loopfilter_4_neon.c @@ -11,7 +11,7 @@ #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" static INLINE void loop_filter_neon(uint8x8_t dblimit, // flimit uint8x8_t dlimit, // limit @@ -108,7 +108,7 @@ static INLINE void loop_filter_neon(uint8x8_t dblimit, // flimit return; } -void vpx_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, +void aom_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -116,7 +116,7 @@ void vpx_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, uint8x8_t dblimit, dlimit, dthresh; uint8x8_t d3u8, d4u8, d5u8, d6u8, d7u8, d16u8, d17u8, d18u8; - if (count == 0) // end_vpx_lf_h_edge + if (count == 0) // end_aom_lf_h_edge return; dblimit = vld1_u8(blimit); @@ -158,7 +158,7 @@ void vpx_lpf_horizontal_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, return; } -void vpx_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, +void aom_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i, pitch8; @@ -170,7 +170,7 @@ void vpx_lpf_vertical_4_neon(uint8_t *src, int pitch, const uint8_t *blimit, uint8x8x2_t d2tmp8, d2tmp9, d2tmp10, d2tmp11; uint8x8x4_t d4Result; - if (count == 0) // end_vpx_lf_h_edge + if (count == 0) // end_aom_lf_h_edge return; dblimit = vld1_u8(blimit); diff --git a/aom_dsp/arm/loopfilter_8_neon.asm b/aom_dsp/arm/loopfilter_8_neon.asm index e81734c046029e2c75d21e6b93bb53f3ec30ea91..512e5d4626f15c21f2fe1cf3036cbb509031b6f0 100644 --- a/aom_dsp/arm/loopfilter_8_neon.asm +++ b/aom_dsp/arm/loopfilter_8_neon.asm @@ -8,18 +8,18 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_lpf_horizontal_8_neon| - EXPORT |vpx_lpf_vertical_8_neon| + EXPORT |aom_lpf_horizontal_8_neon| + EXPORT |aom_lpf_vertical_8_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 -; Currently vpx only works on iterations 8 at a time. The vp8 loop filter +; Currently aom only works on iterations 8 at a time. The vp8 loop filter ; works on 16 iterations at a time. ; TODO(fgalligan): See about removing the count code as this function is only ; called with a count of 1. ; -; void vpx_lpf_horizontal_8_neon(uint8_t *s, int p, +; void aom_lpf_horizontal_8_neon(uint8_t *s, int p, ; const uint8_t *blimit, ; const uint8_t *limit, ; const uint8_t *thresh, @@ -30,7 +30,7 @@ ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vpx_lpf_horizontal_8_neon| PROC +|aom_lpf_horizontal_8_neon| PROC push {r4-r5, lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -39,7 +39,7 @@ add r1, r1, r1 ; double pitch cmp r12, #0 - beq end_vpx_mblf_h_edge + beq end_aom_mblf_h_edge vld1.8 {d1[]}, [r3] ; duplicate *limit vld1.8 {d2[]}, [r2] ; duplicate *thresh @@ -60,7 +60,7 @@ count_mblf_h_loop sub r3, r3, r1, lsl #1 sub r2, r2, r1, lsl #2 - bl vpx_mbloop_filter_neon + bl aom_mbloop_filter_neon vst1.u8 {d0}, [r2@64], r1 ; store op2 vst1.u8 {d1}, [r3@64], r1 ; store op1 @@ -73,12 +73,12 @@ count_mblf_h_loop subs r12, r12, #1 bne count_mblf_h_loop -end_vpx_mblf_h_edge +end_aom_mblf_h_edge pop {r4-r5, pc} - ENDP ; |vpx_lpf_horizontal_8_neon| + ENDP ; |aom_lpf_horizontal_8_neon| -; void vpx_lpf_vertical_8_neon(uint8_t *s, +; void aom_lpf_vertical_8_neon(uint8_t *s, ; int pitch, ; const uint8_t *blimit, ; const uint8_t *limit, @@ -91,7 +91,7 @@ end_vpx_mblf_h_edge ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, ; sp+4 int count -|vpx_lpf_vertical_8_neon| PROC +|aom_lpf_vertical_8_neon| PROC push {r4-r5, lr} vld1.8 {d0[]}, [r2] ; duplicate *blimit @@ -101,7 +101,7 @@ end_vpx_mblf_h_edge ldr r3, [sp, #12] ; load thresh sub r2, r0, #4 ; move s pointer down by 4 columns cmp r12, #0 - beq end_vpx_mblf_v_edge + beq end_aom_mblf_v_edge vld1.8 {d2[]}, [r3] ; duplicate *thresh @@ -134,7 +134,7 @@ count_mblf_v_loop sub r2, r0, #3 add r3, r0, #1 - bl vpx_mbloop_filter_neon + bl aom_mbloop_filter_neon ;store op2, op1, op0, oq0 vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r2], r1 @@ -161,11 +161,11 @@ count_mblf_v_loop subne r2, r0, #4 ; move s pointer down by 4 columns bne count_mblf_v_loop -end_vpx_mblf_v_edge +end_aom_mblf_v_edge pop {r4-r5, pc} - ENDP ; |vpx_lpf_vertical_8_neon| + ENDP ; |aom_lpf_vertical_8_neon| -; void vpx_mbloop_filter_neon(); +; void aom_mbloop_filter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the ; necessary load, transpose (if necessary) and store. The function does not use ; registers d8-d15. @@ -191,7 +191,7 @@ end_vpx_mblf_v_edge ; d3 oq0 ; d4 oq1 ; d5 oq2 -|vpx_mbloop_filter_neon| PROC +|aom_mbloop_filter_neon| PROC ; filter_mask vabd.u8 d19, d3, d4 ; m1 = abs(p3 - p2) vabd.u8 d20, d4, d5 ; m2 = abs(p2 - p1) @@ -446,6 +446,6 @@ filter_branch_only bx lr - ENDP ; |vpx_mbloop_filter_neon| + ENDP ; |aom_mbloop_filter_neon| END diff --git a/aom_dsp/arm/loopfilter_8_neon.c b/aom_dsp/arm/loopfilter_8_neon.c index 9065c6d3e50bde86364067292aaa695a87458d15..1551adb679ce9570418c807d77274359bfa1072b 100644 --- a/aom_dsp/arm/loopfilter_8_neon.c +++ b/aom_dsp/arm/loopfilter_8_neon.c @@ -11,7 +11,7 @@ #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" static INLINE void mbloop_filter_neon(uint8x8_t dblimit, // mblimit uint8x8_t dlimit, // limit @@ -260,7 +260,7 @@ static INLINE void mbloop_filter_neon(uint8x8_t dblimit, // mblimit return; } -void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, +void aom_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -269,7 +269,7 @@ void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8; uint8x8_t d16u8, d17u8, d18u8; - if (count == 0) // end_vpx_mblf_h_edge + if (count == 0) // end_aom_mblf_h_edge return; dblimit = vld1_u8(blimit); @@ -316,7 +316,7 @@ void vpx_lpf_horizontal_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, return; } -void vpx_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, +void aom_lpf_vertical_8_neon(uint8_t *src, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; diff --git a/aom_dsp/arm/loopfilter_mb_neon.asm b/aom_dsp/arm/loopfilter_mb_neon.asm index 20d9cfb1133ba81b7499bc5df5425f5218b600fd..3c7c039acf792176f306277365ccf855ea76ec23 100644 --- a/aom_dsp/arm/loopfilter_mb_neon.asm +++ b/aom_dsp/arm/loopfilter_mb_neon.asm @@ -8,13 +8,13 @@ ; be found in the AUTHORS file in the root of the source tree. ; - EXPORT |vpx_lpf_horizontal_16_neon| - EXPORT |vpx_lpf_vertical_16_neon| + EXPORT |aom_lpf_horizontal_16_neon| + EXPORT |aom_lpf_vertical_16_neon| ARM AREA ||.text||, CODE, READONLY, ALIGN=2 -; void vpx_lpf_horizontal_16_neon(uint8_t *s, int p, +; void aom_lpf_horizontal_16_neon(uint8_t *s, int p, ; const uint8_t *blimit, ; const uint8_t *limit, ; const uint8_t *thresh @@ -24,7 +24,7 @@ ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -|vpx_lpf_horizontal_16_neon| PROC +|aom_lpf_horizontal_16_neon| PROC push {r4-r8, lr} vpush {d8-d15} ldr r4, [sp, #88] ; load thresh @@ -54,7 +54,7 @@ h_count vld1.u8 {d14}, [r8@64], r1 ; q6 vld1.u8 {d15}, [r8@64], r1 ; q7 - bl vpx_wide_mbfilter_neon + bl aom_wide_mbfilter_neon tst r7, #1 beq h_mbfilter @@ -115,9 +115,9 @@ h_next vpop {d8-d15} pop {r4-r8, pc} - ENDP ; |vpx_lpf_horizontal_16_neon| + ENDP ; |aom_lpf_horizontal_16_neon| -; void vpx_lpf_vertical_16_neon(uint8_t *s, int p, +; void aom_lpf_vertical_16_neon(uint8_t *s, int p, ; const uint8_t *blimit, ; const uint8_t *limit, ; const uint8_t *thresh) @@ -126,7 +126,7 @@ h_next ; r2 const uint8_t *blimit, ; r3 const uint8_t *limit, ; sp const uint8_t *thresh, -|vpx_lpf_vertical_16_neon| PROC +|aom_lpf_vertical_16_neon| PROC push {r4-r8, lr} vpush {d8-d15} ldr r4, [sp, #88] ; load thresh @@ -176,7 +176,7 @@ h_next vtrn.8 d12, d13 vtrn.8 d14, d15 - bl vpx_wide_mbfilter_neon + bl aom_wide_mbfilter_neon tst r7, #1 beq v_mbfilter @@ -279,9 +279,9 @@ v_end vpop {d8-d15} pop {r4-r8, pc} - ENDP ; |vpx_lpf_vertical_16_neon| + ENDP ; |aom_lpf_vertical_16_neon| -; void vpx_wide_mbfilter_neon(); +; void aom_wide_mbfilter_neon(); ; This is a helper function for the loopfilters. The invidual functions do the ; necessary load, transpose (if necessary) and store. ; @@ -305,7 +305,7 @@ v_end ; d13 q5 ; d14 q6 ; d15 q7 -|vpx_wide_mbfilter_neon| PROC +|aom_wide_mbfilter_neon| PROC mov r7, #0 ; filter_mask @@ -601,6 +601,6 @@ v_end vbif d3, d14, d17 ; oq6 |= q6 & ~(f2 & f & m) bx lr - ENDP ; |vpx_wide_mbfilter_neon| + ENDP ; |aom_wide_mbfilter_neon| END diff --git a/aom_dsp/arm/loopfilter_neon.c b/aom_dsp/arm/loopfilter_neon.c index 2f4c055c29432afc3c116b808b8093ef67882b64..15b093487e951b4152510b04c05d0b7b2fbbb4af 100644 --- a/aom_dsp/arm/loopfilter_neon.c +++ b/aom_dsp/arm/loopfilter_neon.c @@ -11,39 +11,39 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" -void vpx_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_vertical_4_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); + aom_lpf_vertical_4_neon(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_vertical_4_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); } #if HAVE_NEON_ASM -void vpx_lpf_horizontal_8_dual_neon( +void aom_lpf_horizontal_8_dual_neon( uint8_t *s, int p /* pitch */, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1); + aom_lpf_horizontal_8_neon(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_horizontal_8_neon(s + 8, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_vertical_8_dual_neon(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); + aom_lpf_vertical_8_neon(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_vertical_8_neon(s + 8 * p, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit, +void aom_lpf_vertical_16_dual_neon(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { - vpx_lpf_vertical_16_neon(s, p, blimit, limit, thresh); - vpx_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh); + aom_lpf_vertical_16_neon(s, p, blimit, limit, thresh); + aom_lpf_vertical_16_neon(s + 8 * p, p, blimit, limit, thresh); } #endif // HAVE_NEON_ASM diff --git a/aom_dsp/arm/sad4d_neon.c b/aom_dsp/arm/sad4d_neon.c index e5df5fef69730a965262ec0976856cff698719b2..a1eeaf4b77e3f9ee0264f86a30aa54e1c60c8543 100644 --- a/aom_dsp/arm/sad4d_neon.c +++ b/aom_dsp/arm/sad4d_neon.c @@ -11,9 +11,9 @@ #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo, const uint16x8_t vec_hi) { @@ -79,7 +79,7 @@ static void sad_neon_32(const uint8x16_t vec_src_00, vget_high_u8(vec_ref_16)); } -void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride, +void aom_sad64x64x4d_neon(const uint8_t *src, int src_stride, const uint8_t *const ref[4], int ref_stride, uint32_t *res) { int i; @@ -125,7 +125,7 @@ void vpx_sad64x64x4d_neon(const uint8_t *src, int src_stride, res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi); } -void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, +void aom_sad32x32x4d_neon(const uint8_t *src, int src_stride, const uint8_t *const ref[4], int ref_stride, uint32_t *res) { int i; @@ -169,7 +169,7 @@ void vpx_sad32x32x4d_neon(const uint8_t *src, int src_stride, res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi); } -void vpx_sad16x16x4d_neon(const uint8_t *src, int src_stride, +void aom_sad16x16x4d_neon(const uint8_t *src, int src_stride, const uint8_t *const ref[4], int ref_stride, uint32_t *res) { int i; diff --git a/aom_dsp/arm/sad_media.asm b/aom_dsp/arm/sad_media.asm index aed1d3a22ed54f2bc84abaf375f00f9cff9c8365..9d815a215f7577ea96032212bb5fc83531ed8428 100644 --- a/aom_dsp/arm/sad_media.asm +++ b/aom_dsp/arm/sad_media.asm @@ -9,7 +9,7 @@ ; - EXPORT |vpx_sad16x16_media| + EXPORT |aom_sad16x16_media| ARM REQUIRE8 @@ -21,7 +21,7 @@ ; r1 int src_stride ; r2 const unsigned char *ref_ptr ; r3 int ref_stride -|vpx_sad16x16_media| PROC +|aom_sad16x16_media| PROC stmfd sp!, {r4-r12, lr} pld [r0, r1, lsl #0] diff --git a/aom_dsp/arm/sad_neon.c b/aom_dsp/arm/sad_neon.c index c37fe307046c6ce7dc0ae014fea5b3379b19c05b..2f452f55b5172116bb5a9f5f7323a98486f43508 100644 --- a/aom_dsp/arm/sad_neon.c +++ b/aom_dsp/arm/sad_neon.c @@ -11,11 +11,11 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" -unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride, +unsigned int aom_sad8x16_neon(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride) { uint8x8_t d0, d8; uint16x8_t q12; @@ -46,7 +46,7 @@ unsigned int vpx_sad8x16_neon(unsigned char *src_ptr, int src_stride, return vget_lane_u32(d5, 0); } -unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride, +unsigned int aom_sad4x4_neon(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride) { uint8x8_t d0, d8; uint16x8_t q12; @@ -74,7 +74,7 @@ unsigned int vpx_sad4x4_neon(unsigned char *src_ptr, int src_stride, return vget_lane_u32(vreinterpret_u32_u64(d3), 0); } -unsigned int vpx_sad16x8_neon(unsigned char *src_ptr, int src_stride, +unsigned int aom_sad16x8_neon(unsigned char *src_ptr, int src_stride, unsigned char *ref_ptr, int ref_stride) { uint8x16_t q0, q4; uint16x8_t q12, q13; @@ -128,7 +128,7 @@ static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) { return vget_lane_u32(c, 0); } -unsigned int vpx_sad64x64_neon(const uint8_t *src, int src_stride, +unsigned int aom_sad64x64_neon(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride) { int i; uint16x8_t vec_accum_lo = vdupq_n_u16(0); @@ -164,7 +164,7 @@ unsigned int vpx_sad64x64_neon(const uint8_t *src, int src_stride, return horizontal_long_add_16x8(vec_accum_lo, vec_accum_hi); } -unsigned int vpx_sad32x32_neon(const uint8_t *src, int src_stride, +unsigned int aom_sad32x32_neon(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride) { int i; uint16x8_t vec_accum_lo = vdupq_n_u16(0); @@ -189,7 +189,7 @@ unsigned int vpx_sad32x32_neon(const uint8_t *src, int src_stride, return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi)); } -unsigned int vpx_sad16x16_neon(const uint8_t *src, int src_stride, +unsigned int aom_sad16x16_neon(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride) { int i; uint16x8_t vec_accum_lo = vdupq_n_u16(0); @@ -208,7 +208,7 @@ unsigned int vpx_sad16x16_neon(const uint8_t *src, int src_stride, return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi)); } -unsigned int vpx_sad8x8_neon(const uint8_t *src, int src_stride, +unsigned int aom_sad8x8_neon(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride) { int i; uint16x8_t vec_accum = vdupq_n_u16(0); diff --git a/aom_dsp/arm/save_reg_neon.asm b/aom_dsp/arm/save_reg_neon.asm index c9ca10801df49cdbd00f70dc21b51da99cc3d194..b802792dafa46f94c744a77e4f3281dae0da2949 100644 --- a/aom_dsp/arm/save_reg_neon.asm +++ b/aom_dsp/arm/save_reg_neon.asm @@ -9,8 +9,8 @@ ; - EXPORT |vpx_push_neon| - EXPORT |vpx_pop_neon| + EXPORT |aom_push_neon| + EXPORT |aom_pop_neon| ARM REQUIRE8 @@ -18,14 +18,14 @@ AREA ||.text||, CODE, READONLY, ALIGN=2 -|vpx_push_neon| PROC +|aom_push_neon| PROC vst1.i64 {d8, d9, d10, d11}, [r0]! vst1.i64 {d12, d13, d14, d15}, [r0]! bx lr ENDP -|vpx_pop_neon| PROC +|aom_pop_neon| PROC vld1.i64 {d8, d9, d10, d11}, [r0]! vld1.i64 {d12, d13, d14, d15}, [r0]! bx lr diff --git a/aom_dsp/arm/subpel_variance_media.c b/aom_dsp/arm/subpel_variance_media.c index 57119b6a28271b5cb67c8e6425db1e16e9d29e8e..46ec028d3703cf9cc8194acdb16ddc9719f7ea14 100644 --- a/aom_dsp/arm/subpel_variance_media.c +++ b/aom_dsp/arm/subpel_variance_media.c @@ -9,9 +9,9 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" #if HAVE_MEDIA @@ -20,15 +20,15 @@ static const int16_t bilinear_filters_media[8][2] = { { 128, 0 }, { 112, 16 }, { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 } }; -extern void vpx_filter_block2d_bil_first_pass_media( +extern void aom_filter_block2d_bil_first_pass_media( const uint8_t *src_ptr, uint16_t *dst_ptr, uint32_t src_pitch, uint32_t height, uint32_t width, const int16_t *filter); -extern void vpx_filter_block2d_bil_second_pass_media( +extern void aom_filter_block2d_bil_second_pass_media( const uint16_t *src_ptr, uint8_t *dst_ptr, int32_t src_pitch, uint32_t height, uint32_t width, const int16_t *filter); -unsigned int vpx_sub_pixel_variance8x8_media( +unsigned int aom_sub_pixel_variance8x8_media( const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) { uint16_t first_pass[10 * 8]; @@ -38,16 +38,16 @@ unsigned int vpx_sub_pixel_variance8x8_media( HFilter = bilinear_filters_media[xoffset]; VFilter = bilinear_filters_media[yoffset]; - vpx_filter_block2d_bil_first_pass_media(src_ptr, first_pass, + aom_filter_block2d_bil_first_pass_media(src_ptr, first_pass, src_pixels_per_line, 9, 8, HFilter); - vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8, + aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 8, 8, 8, VFilter); - return vpx_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line, + return aom_variance8x8_media(second_pass, 8, dst_ptr, dst_pixels_per_line, sse); } -unsigned int vpx_sub_pixel_variance16x16_media( +unsigned int aom_sub_pixel_variance16x16_media( const uint8_t *src_ptr, int src_pixels_per_line, int xoffset, int yoffset, const uint8_t *dst_ptr, int dst_pixels_per_line, unsigned int *sse) { uint16_t first_pass[36 * 16]; @@ -56,24 +56,24 @@ unsigned int vpx_sub_pixel_variance16x16_media( unsigned int var; if (xoffset == 4 && yoffset == 0) { - var = vpx_variance_halfpixvar16x16_h_media( + var = aom_variance_halfpixvar16x16_h_media( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse); } else if (xoffset == 0 && yoffset == 4) { - var = vpx_variance_halfpixvar16x16_v_media( + var = aom_variance_halfpixvar16x16_v_media( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse); } else if (xoffset == 4 && yoffset == 4) { - var = vpx_variance_halfpixvar16x16_hv_media( + var = aom_variance_halfpixvar16x16_hv_media( src_ptr, src_pixels_per_line, dst_ptr, dst_pixels_per_line, sse); } else { HFilter = bilinear_filters_media[xoffset]; VFilter = bilinear_filters_media[yoffset]; - vpx_filter_block2d_bil_first_pass_media( + aom_filter_block2d_bil_first_pass_media( src_ptr, first_pass, src_pixels_per_line, 17, 16, HFilter); - vpx_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16, + aom_filter_block2d_bil_second_pass_media(first_pass, second_pass, 16, 16, 16, VFilter); - var = vpx_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line, + var = aom_variance16x16_media(second_pass, 16, dst_ptr, dst_pixels_per_line, sse); } return var; diff --git a/aom_dsp/arm/subpel_variance_neon.c b/aom_dsp/arm/subpel_variance_neon.c index 8c5f0f492214636e62f8c515a9212b9c5944bbaf..064b72d6fcc70e7ad174fa37a91f17dae07e56f6 100644 --- a/aom_dsp/arm/subpel_variance_neon.c +++ b/aom_dsp/arm/subpel_variance_neon.c @@ -10,11 +10,11 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" +#include "./aom_dsp_rtcd.h" +#include "./aom_config.h" #include "aom_ports/mem.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #include "aom_dsp/variance.h" @@ -74,7 +74,7 @@ static void var_filter_block2d_bil_w16(const uint8_t *src_ptr, } } -unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride, +unsigned int aom_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *dst, int dst_stride, unsigned int *sse) { @@ -85,10 +85,10 @@ unsigned int vpx_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride, bilinear_filters[xoffset]); var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, 8, bilinear_filters[yoffset]); - return vpx_variance8x8_neon(temp2, 8, dst, dst_stride, sse); + return aom_variance8x8_neon(temp2, 8, dst, dst_stride, sse); } -unsigned int vpx_sub_pixel_variance16x16_neon(const uint8_t *src, +unsigned int aom_sub_pixel_variance16x16_neon(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *dst, int dst_stride, @@ -100,10 +100,10 @@ unsigned int vpx_sub_pixel_variance16x16_neon(const uint8_t *src, bilinear_filters[xoffset]); var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, 16, bilinear_filters[yoffset]); - return vpx_variance16x16_neon(temp2, 16, dst, dst_stride, sse); + return aom_variance16x16_neon(temp2, 16, dst, dst_stride, sse); } -unsigned int vpx_sub_pixel_variance32x32_neon(const uint8_t *src, +unsigned int aom_sub_pixel_variance32x32_neon(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *dst, int dst_stride, @@ -115,10 +115,10 @@ unsigned int vpx_sub_pixel_variance32x32_neon(const uint8_t *src, bilinear_filters[xoffset]); var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, 32, bilinear_filters[yoffset]); - return vpx_variance32x32_neon(temp2, 32, dst, dst_stride, sse); + return aom_variance32x32_neon(temp2, 32, dst, dst_stride, sse); } -unsigned int vpx_sub_pixel_variance64x64_neon(const uint8_t *src, +unsigned int aom_sub_pixel_variance64x64_neon(const uint8_t *src, int src_stride, int xoffset, int yoffset, const uint8_t *dst, int dst_stride, @@ -130,5 +130,5 @@ unsigned int vpx_sub_pixel_variance64x64_neon(const uint8_t *src, bilinear_filters[xoffset]); var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, 64, bilinear_filters[yoffset]); - return vpx_variance64x64_neon(temp2, 64, dst, dst_stride, sse); + return aom_variance64x64_neon(temp2, 64, dst, dst_stride, sse); } diff --git a/aom_dsp/arm/subtract_neon.c b/aom_dsp/arm/subtract_neon.c index 99d71365f5e255bac7c38711dbdd4dbb6df0df28..cb8a2daf8ad19526a6ff6942367147eed4fe74a4 100644 --- a/aom_dsp/arm/subtract_neon.c +++ b/aom_dsp/arm/subtract_neon.c @@ -11,10 +11,10 @@ #include -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" -void vpx_subtract_block_neon(int rows, int cols, int16_t *diff, +void aom_subtract_block_neon(int rows, int cols, int16_t *diff, ptrdiff_t diff_stride, const uint8_t *src, ptrdiff_t src_stride, const uint8_t *pred, ptrdiff_t pred_stride) { diff --git a/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm index dab845a204cc9a15113a1ea5ace7e975a80c288c..52214f79aac82c7d2dceb68e655303cdab14a480 100644 --- a/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm +++ b/aom_dsp/arm/variance_halfpixvar16x16_h_media.asm @@ -9,7 +9,7 @@ ; - EXPORT |vpx_variance_halfpixvar16x16_h_media| + EXPORT |aom_variance_halfpixvar16x16_h_media| ARM REQUIRE8 @@ -22,7 +22,7 @@ ; r2 unsigned char *ref_ptr ; r3 int recon_stride ; stack unsigned int *sse -|vpx_variance_halfpixvar16x16_h_media| PROC +|aom_variance_halfpixvar16x16_h_media| PROC stmfd sp!, {r4-r12, lr} diff --git a/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm index 01953b70949c1c191672390c0f97374f360373ca..a3f60fc3cec99f88161e9a27447e4a036b48df0b 100644 --- a/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm +++ b/aom_dsp/arm/variance_halfpixvar16x16_hv_media.asm @@ -9,7 +9,7 @@ ; - EXPORT |vpx_variance_halfpixvar16x16_hv_media| + EXPORT |aom_variance_halfpixvar16x16_hv_media| ARM REQUIRE8 @@ -22,7 +22,7 @@ ; r2 unsigned char *ref_ptr ; r3 int recon_stride ; stack unsigned int *sse -|vpx_variance_halfpixvar16x16_hv_media| PROC +|aom_variance_halfpixvar16x16_hv_media| PROC stmfd sp!, {r4-r12, lr} diff --git a/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm b/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm index 0d17acb38f5e3a6b5c6d0ff257c71a3dbb5eb9f8..b8071bef8dd0bfe9444ebd004ce5eaadeb0a9696 100644 --- a/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm +++ b/aom_dsp/arm/variance_halfpixvar16x16_v_media.asm @@ -9,7 +9,7 @@ ; - EXPORT |vpx_variance_halfpixvar16x16_v_media| + EXPORT |aom_variance_halfpixvar16x16_v_media| ARM REQUIRE8 @@ -22,7 +22,7 @@ ; r2 unsigned char *ref_ptr ; r3 int recon_stride ; stack unsigned int *sse -|vpx_variance_halfpixvar16x16_v_media| PROC +|aom_variance_halfpixvar16x16_v_media| PROC stmfd sp!, {r4-r12, lr} diff --git a/aom_dsp/arm/variance_media.asm b/aom_dsp/arm/variance_media.asm index f7f9e14b0a79935d96beeb3016b9237b4c5a2b9f..8a21fdc253ca178e331af08567d5930c21a5ddec 100644 --- a/aom_dsp/arm/variance_media.asm +++ b/aom_dsp/arm/variance_media.asm @@ -9,9 +9,9 @@ ; - EXPORT |vpx_variance16x16_media| - EXPORT |vpx_variance8x8_media| - EXPORT |vpx_mse16x16_media| + EXPORT |aom_variance16x16_media| + EXPORT |aom_variance8x8_media| + EXPORT |aom_mse16x16_media| ARM REQUIRE8 @@ -24,7 +24,7 @@ ; r2 unsigned char *ref_ptr ; r3 int recon_stride ; stack unsigned int *sse -|vpx_variance16x16_media| PROC +|aom_variance16x16_media| PROC stmfd sp!, {r4-r12, lr} @@ -157,7 +157,7 @@ loop16x16 ; r2 unsigned char *ref_ptr ; r3 int recon_stride ; stack unsigned int *sse -|vpx_variance8x8_media| PROC +|aom_variance8x8_media| PROC push {r4-r10, lr} @@ -241,10 +241,10 @@ loop8x8 ; r3 int recon_stride ; stack unsigned int *sse ; -;note: Based on vpx_variance16x16_media. In this function, sum is never used. +;note: Based on aom_variance16x16_media. In this function, sum is never used. ; So, we can remove this part of calculation. -|vpx_mse16x16_media| PROC +|aom_mse16x16_media| PROC push {r4-r9, lr} diff --git a/aom_dsp/arm/variance_neon.c b/aom_dsp/arm/variance_neon.c index d69f7cb1b257b46b4600186e7324787b2a529dfc..e6ebbc9fc0dc41557d3be79669661312dcecd2de 100644 --- a/aom_dsp/arm/variance_neon.c +++ b/aom_dsp/arm/variance_neon.c @@ -11,10 +11,10 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "./vpx_config.h" +#include "./aom_dsp_rtcd.h" +#include "./aom_config.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) { @@ -61,17 +61,17 @@ static void variance_neon_w8(const uint8_t *a, int a_stride, const uint8_t *b, *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi)); } -void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b, +void aom_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse, int *sum) { variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum); } -void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b, +void aom_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse, int *sum) { variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum); } -unsigned int vpx_variance8x8_neon(const uint8_t *a, int a_stride, +unsigned int aom_variance8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse) { int sum; @@ -79,7 +79,7 @@ unsigned int vpx_variance8x8_neon(const uint8_t *a, int a_stride, return *sse - (((int64_t)sum * sum) >> 6); // >> 6 = / 8 * 8 } -unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride, +unsigned int aom_variance16x16_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse) { int sum; @@ -87,7 +87,7 @@ unsigned int vpx_variance16x16_neon(const uint8_t *a, int a_stride, return *sse - (((int64_t)sum * sum) >> 8); // >> 8 = / 16 * 16 } -unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride, +unsigned int aom_variance32x32_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse) { int sum; @@ -95,7 +95,7 @@ unsigned int vpx_variance32x32_neon(const uint8_t *a, int a_stride, return *sse - (((int64_t)sum * sum) >> 10); // >> 10 = / 32 * 32 } -unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride, +unsigned int aom_variance32x64_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse) { int sum1, sum2; @@ -108,7 +108,7 @@ unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride, return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64 } -unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride, +unsigned int aom_variance64x32_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse) { int sum1, sum2; @@ -121,7 +121,7 @@ unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride, return *sse - (((int64_t)sum1 * sum1) >> 11); // >> 11 = / 32 * 64 } -unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride, +unsigned int aom_variance64x64_neon(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse) { int sum1, sum2; @@ -145,7 +145,7 @@ unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride, return *sse - (((int64_t)sum1 * sum1) >> 12); // >> 12 = / 64 * 64 } -unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr, +unsigned int aom_variance16x8_neon(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *sse) { @@ -221,7 +221,7 @@ unsigned int vpx_variance16x8_neon(const unsigned char *src_ptr, return vget_lane_u32(d0u32, 0); } -unsigned int vpx_variance8x16_neon(const unsigned char *src_ptr, +unsigned int aom_variance8x16_neon(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *sse) { @@ -283,7 +283,7 @@ unsigned int vpx_variance8x16_neon(const unsigned char *src_ptr, return vget_lane_u32(d0u32, 0); } -unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride, +unsigned int aom_mse16x16_neon(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *sse) { int i; @@ -346,7 +346,7 @@ unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride, return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0); } -unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr, +unsigned int aom_get4x4sse_cs_neon(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride) { diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c index eda733a105101841f3e2fd1ea185fe820664b749..bbdb090d1496859647b2f7d1b64ed21aef918f88 100644 --- a/aom_dsp/avg.c +++ b/aom_dsp/avg.c @@ -10,10 +10,10 @@ */ #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" -unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) { +unsigned int aom_avg_8x8_c(const uint8_t *s, int p) { int i, j; int sum = 0; for (i = 0; i < 8; ++i, s += p) @@ -23,7 +23,7 @@ unsigned int vpx_avg_8x8_c(const uint8_t *s, int p) { return (sum + 32) >> 6; } -unsigned int vpx_avg_4x4_c(const uint8_t *s, int p) { +unsigned int aom_avg_4x4_c(const uint8_t *s, int p) { int i, j; int sum = 0; for (i = 0; i < 4; ++i, s += p) @@ -65,7 +65,7 @@ static void hadamard_col8(const int16_t *src_diff, int src_stride, coeff[5] = c3 - c7; } -void vpx_hadamard_8x8_c(int16_t const *src_diff, int src_stride, +void aom_hadamard_8x8_c(int16_t const *src_diff, int src_stride, int16_t *coeff) { int idx; int16_t buffer[64]; @@ -88,14 +88,14 @@ void vpx_hadamard_8x8_c(int16_t const *src_diff, int src_stride, } // In place 16x16 2D Hadamard transform -void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride, +void aom_hadamard_16x16_c(int16_t const *src_diff, int src_stride, int16_t *coeff) { int idx; for (idx = 0; idx < 4; ++idx) { // src_diff: 9 bit, dynamic range [-255, 255] int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8; - vpx_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64); + aom_hadamard_8x8_c(src_ptr, src_stride, coeff + idx * 64); } // coeff: 15 bit, dynamic range [-16320, 16320] @@ -121,7 +121,7 @@ void vpx_hadamard_16x16_c(int16_t const *src_diff, int src_stride, // coeff: 16 bits, dynamic range [-32640, 32640]. // length: value range {16, 64, 256, 1024}. -int vpx_satd_c(const int16_t *coeff, int length) { +int aom_satd_c(const int16_t *coeff, int length) { int i; int satd = 0; for (i = 0; i < length; ++i) satd += abs(coeff[i]); @@ -132,7 +132,7 @@ int vpx_satd_c(const int16_t *coeff, int length) { // Integer projection onto row vectors. // height: value range {16, 32, 64}. -void vpx_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref, +void aom_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref, const int ref_stride, const int height) { int idx; const int norm_factor = height >> 1; @@ -148,7 +148,7 @@ void vpx_int_pro_row_c(int16_t hbuf[16], uint8_t const *ref, } // width: value range {16, 32, 64}. -int16_t vpx_int_pro_col_c(uint8_t const *ref, const int width) { +int16_t aom_int_pro_col_c(uint8_t const *ref, const int width) { int idx; int16_t sum = 0; // sum: 14 bit, dynamic range [0, 16320] @@ -159,7 +159,7 @@ int16_t vpx_int_pro_col_c(uint8_t const *ref, const int width) { // ref: [0 - 510] // src: [0 - 510] // bwl: {2, 3, 4} -int vpx_vector_var_c(int16_t const *ref, int16_t const *src, const int bwl) { +int aom_vector_var_c(int16_t const *ref, int16_t const *src, const int bwl) { int i; int width = 4 << bwl; int sse = 0, mean = 0, var; @@ -175,7 +175,7 @@ int vpx_vector_var_c(int16_t const *ref, int16_t const *src, const int bwl) { return var; } -void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, +void aom_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max) { int i, j; *min = 255; @@ -190,7 +190,7 @@ void vpx_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp, } #if CONFIG_VPX_HIGHBITDEPTH -unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) { +unsigned int aom_highbd_avg_8x8_c(const uint8_t *s8, int p) { int i, j; int sum = 0; const uint16_t *s = CONVERT_TO_SHORTPTR(s8); @@ -201,7 +201,7 @@ unsigned int vpx_highbd_avg_8x8_c(const uint8_t *s8, int p) { return (sum + 32) >> 6; } -unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) { +unsigned int aom_highbd_avg_4x4_c(const uint8_t *s8, int p) { int i, j; int sum = 0; const uint16_t *s = CONVERT_TO_SHORTPTR(s8); @@ -212,7 +212,7 @@ unsigned int vpx_highbd_avg_4x4_c(const uint8_t *s8, int p) { return (sum + 8) >> 4; } -void vpx_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8, +void aom_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8, int dp, int *min, int *max) { int i, j; const uint16_t *s = CONVERT_TO_SHORTPTR(s8); diff --git a/aom_dsp/bitreader.c b/aom_dsp/bitreader.c index 822776ebcc1862739fea360c6c29d26a00b99b45..142054221f3478e9ae83155d219a6b79fdc7e034 100644 --- a/aom_dsp/bitreader.c +++ b/aom_dsp/bitreader.c @@ -10,17 +10,17 @@ */ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/bitreader.h" #include "aom_dsp/prob.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #include "aom_util/endian_inl.h" -int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size, - vpx_decrypt_cb decrypt_cb, void *decrypt_state) { +int aom_reader_init(aom_reader *r, const uint8_t *buffer, size_t size, + aom_decrypt_cb decrypt_cb, void *decrypt_state) { if (size && !buffer) { return 1; } else { @@ -31,12 +31,12 @@ int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size, r->range = 255; r->decrypt_cb = decrypt_cb; r->decrypt_state = decrypt_state; - vpx_reader_fill(r); - return vpx_read_bit(r) != 0; // marker bit + aom_reader_fill(r); + return aom_read_bit(r) != 0; // marker bit } } -void vpx_reader_fill(vpx_reader *r) { +void aom_reader_fill(aom_reader *r) { const uint8_t *const buffer_end = r->buffer_end; const uint8_t *buffer = r->buffer; const uint8_t *buffer_start = buffer; @@ -91,7 +91,7 @@ void vpx_reader_fill(vpx_reader *r) { r->count = count; } -const uint8_t *vpx_reader_find_end(vpx_reader *r) { +const uint8_t *aom_reader_find_end(aom_reader *r) { // Find the end of the coded buffer while (r->count > CHAR_BIT && r->count < BD_VALUE_SIZE) { r->count -= CHAR_BIT; diff --git a/aom_dsp/bitreader.h b/aom_dsp/bitreader.h index 07ff7f9196e5bbb127c537cc1b5a7ef0a29ff069..897a09959ade5bbd96ca1bbc68be169ef1c98272 100644 --- a/aom_dsp/bitreader.h +++ b/aom_dsp/bitreader.h @@ -15,10 +15,10 @@ #include #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_ports/mem.h" #include "aom/vp8dx.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #include "aom_dsp/prob.h" #ifdef __cplusplus @@ -41,19 +41,19 @@ typedef struct { int count; const uint8_t *buffer_end; const uint8_t *buffer; - vpx_decrypt_cb decrypt_cb; + aom_decrypt_cb decrypt_cb; void *decrypt_state; uint8_t clear_buffer[sizeof(BD_VALUE) + 1]; -} vpx_reader; +} aom_reader; -int vpx_reader_init(vpx_reader *r, const uint8_t *buffer, size_t size, - vpx_decrypt_cb decrypt_cb, void *decrypt_state); +int aom_reader_init(aom_reader *r, const uint8_t *buffer, size_t size, + aom_decrypt_cb decrypt_cb, void *decrypt_state); -void vpx_reader_fill(vpx_reader *r); +void aom_reader_fill(aom_reader *r); -const uint8_t *vpx_reader_find_end(vpx_reader *r); +const uint8_t *aom_reader_find_end(aom_reader *r); -static INLINE int vpx_reader_has_error(vpx_reader *r) { +static INLINE int aom_reader_has_error(aom_reader *r) { // Check if we have reached the end of the buffer. // // Variable 'count' stores the number of bits in the 'value' buffer, minus @@ -71,7 +71,7 @@ static INLINE int vpx_reader_has_error(vpx_reader *r) { return r->count > BD_VALUE_SIZE && r->count < LOTS_OF_BITS; } -static INLINE int vpx_read(vpx_reader *r, int prob) { +static INLINE int aom_read(aom_reader *r, int prob) { unsigned int bit = 0; BD_VALUE value; BD_VALUE bigsplit; @@ -79,7 +79,7 @@ static INLINE int vpx_read(vpx_reader *r, int prob) { unsigned int range; unsigned int split = (r->range * prob + (256 - prob)) >> CHAR_BIT; - if (r->count < 0) vpx_reader_fill(r); + if (r->count < 0) aom_reader_fill(r); value = r->value; count = r->count; @@ -95,7 +95,7 @@ static INLINE int vpx_read(vpx_reader *r, int prob) { } { - register int shift = vpx_norm[range]; + register int shift = aom_norm[range]; range <<= shift; value <<= shift; count -= shift; @@ -107,23 +107,23 @@ static INLINE int vpx_read(vpx_reader *r, int prob) { return bit; } -static INLINE int vpx_read_bit(vpx_reader *r) { - return vpx_read(r, 128); // vpx_prob_half +static INLINE int aom_read_bit(aom_reader *r) { + return aom_read(r, 128); // aom_prob_half } -static INLINE int vpx_read_literal(vpx_reader *r, int bits) { +static INLINE int aom_read_literal(aom_reader *r, int bits) { int literal = 0, bit; - for (bit = bits - 1; bit >= 0; bit--) literal |= vpx_read_bit(r) << bit; + for (bit = bits - 1; bit >= 0; bit--) literal |= aom_read_bit(r) << bit; return literal; } -static INLINE int vpx_read_tree(vpx_reader *r, const vpx_tree_index *tree, - const vpx_prob *probs) { - vpx_tree_index i = 0; +static INLINE int aom_read_tree(aom_reader *r, const aom_tree_index *tree, + const aom_prob *probs) { + aom_tree_index i = 0; - while ((i = tree[i + vpx_read(r, probs[i >> 1])]) > 0) continue; + while ((i = tree[i + aom_read(r, probs[i >> 1])]) > 0) continue; return -i; } diff --git a/aom_dsp/bitreader_buffer.c b/aom_dsp/bitreader_buffer.c index 59a5a57c1614c548d0259e7ed405fe629de2cbdc..aafa3bde79006e3c64e784f322c30342db42550f 100644 --- a/aom_dsp/bitreader_buffer.c +++ b/aom_dsp/bitreader_buffer.c @@ -8,14 +8,14 @@ * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" +#include "./aom_config.h" #include "./bitreader_buffer.h" -size_t vpx_rb_bytes_read(struct vpx_read_bit_buffer *rb) { +size_t aom_rb_bytes_read(struct aom_read_bit_buffer *rb) { return (rb->bit_offset + 7) >> 3; } -int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb) { +int aom_rb_read_bit(struct aom_read_bit_buffer *rb) { const size_t off = rb->bit_offset; const size_t p = off >> 3; const int q = 7 - (int)(off & 0x7); @@ -29,23 +29,23 @@ int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb) { } } -int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits) { +int aom_rb_read_literal(struct aom_read_bit_buffer *rb, int bits) { int value = 0, bit; - for (bit = bits - 1; bit >= 0; bit--) value |= vpx_rb_read_bit(rb) << bit; + for (bit = bits - 1; bit >= 0; bit--) value |= aom_rb_read_bit(rb) << bit; return value; } -int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits) { - const int value = vpx_rb_read_literal(rb, bits); - return vpx_rb_read_bit(rb) ? -value : value; +int aom_rb_read_signed_literal(struct aom_read_bit_buffer *rb, int bits) { + const int value = aom_rb_read_literal(rb, bits); + return aom_rb_read_bit(rb) ? -value : value; } -int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits) { +int aom_rb_read_inv_signed_literal(struct aom_read_bit_buffer *rb, int bits) { #if CONFIG_MISC_FIXES const int nbits = sizeof(unsigned) * 8 - bits - 1; - const unsigned value = (unsigned)vpx_rb_read_literal(rb, bits + 1) << nbits; + const unsigned value = (unsigned)aom_rb_read_literal(rb, bits + 1) << nbits; return ((int)value) >> nbits; #else - return vpx_rb_read_signed_literal(rb, bits); + return aom_rb_read_signed_literal(rb, bits); #endif } diff --git a/aom_dsp/bitreader_buffer.h b/aom_dsp/bitreader_buffer.h index e2558fc9dc29955b059b066b836c891f75a015d6..5636f63f63b58e668684c4ef315766f79144b2df 100644 --- a/aom_dsp/bitreader_buffer.h +++ b/aom_dsp/bitreader_buffer.h @@ -14,32 +14,32 @@ #include -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #ifdef __cplusplus extern "C" { #endif -typedef void (*vpx_rb_error_handler)(void *data); +typedef void (*aom_rb_error_handler)(void *data); -struct vpx_read_bit_buffer { +struct aom_read_bit_buffer { const uint8_t *bit_buffer; const uint8_t *bit_buffer_end; size_t bit_offset; void *error_handler_data; - vpx_rb_error_handler error_handler; + aom_rb_error_handler error_handler; }; -size_t vpx_rb_bytes_read(struct vpx_read_bit_buffer *rb); +size_t aom_rb_bytes_read(struct aom_read_bit_buffer *rb); -int vpx_rb_read_bit(struct vpx_read_bit_buffer *rb); +int aom_rb_read_bit(struct aom_read_bit_buffer *rb); -int vpx_rb_read_literal(struct vpx_read_bit_buffer *rb, int bits); +int aom_rb_read_literal(struct aom_read_bit_buffer *rb, int bits); -int vpx_rb_read_signed_literal(struct vpx_read_bit_buffer *rb, int bits); +int aom_rb_read_signed_literal(struct aom_read_bit_buffer *rb, int bits); -int vpx_rb_read_inv_signed_literal(struct vpx_read_bit_buffer *rb, int bits); +int aom_rb_read_inv_signed_literal(struct aom_read_bit_buffer *rb, int bits); #ifdef __cplusplus } // extern "C" diff --git a/aom_dsp/bitwriter.c b/aom_dsp/bitwriter.c index 62a1df8ec889cca8a41044abd1c27e260eb1df76..973b5e8226f171cf078c3b0d26705d083045a3f3 100644 --- a/aom_dsp/bitwriter.c +++ b/aom_dsp/bitwriter.c @@ -13,19 +13,19 @@ #include "./bitwriter.h" -void vpx_start_encode(vpx_writer *br, uint8_t *source) { +void aom_start_encode(aom_writer *br, uint8_t *source) { br->lowvalue = 0; br->range = 255; br->count = -24; br->buffer = source; br->pos = 0; - vpx_write_bit(br, 0); + aom_write_bit(br, 0); } -void vpx_stop_encode(vpx_writer *br) { +void aom_stop_encode(aom_writer *br) { int i; - for (i = 0; i < 32; i++) vpx_write_bit(br, 0); + for (i = 0; i < 32; i++) aom_write_bit(br, 0); // Ensure there's no ambigous collision with any index marker bytes if ((br->buffer[br->pos - 1] & 0xe0) == 0xc0) br->buffer[br->pos++] = 0; diff --git a/aom_dsp/bitwriter.h b/aom_dsp/bitwriter.h index 2ea1800fc44f9e7e93dc81f0f73dcc9a5b8f5162..f731a6595268815f02a4b4ed4ae175bd2e4b7085 100644 --- a/aom_dsp/bitwriter.h +++ b/aom_dsp/bitwriter.h @@ -20,18 +20,18 @@ extern "C" { #endif -typedef struct vpx_writer { +typedef struct aom_writer { unsigned int lowvalue; unsigned int range; int count; unsigned int pos; uint8_t *buffer; -} vpx_writer; +} aom_writer; -void vpx_start_encode(vpx_writer *bc, uint8_t *buffer); -void vpx_stop_encode(vpx_writer *bc); +void aom_start_encode(aom_writer *bc, uint8_t *buffer); +void aom_stop_encode(aom_writer *bc); -static INLINE void vpx_write(vpx_writer *br, int bit, int probability) { +static INLINE void aom_write(aom_writer *br, int bit, int probability) { unsigned int split; int count = br->count; unsigned int range = br->range; @@ -47,7 +47,7 @@ static INLINE void vpx_write(vpx_writer *br, int bit, int probability) { range = br->range - split; } - shift = vpx_norm[range]; + shift = aom_norm[range]; range <<= shift; count += shift; @@ -79,17 +79,17 @@ static INLINE void vpx_write(vpx_writer *br, int bit, int probability) { br->range = range; } -static INLINE void vpx_write_bit(vpx_writer *w, int bit) { - vpx_write(w, bit, 128); // vpx_prob_half +static INLINE void aom_write_bit(aom_writer *w, int bit) { + aom_write(w, bit, 128); // aom_prob_half } -static INLINE void vpx_write_literal(vpx_writer *w, int data, int bits) { +static INLINE void aom_write_literal(aom_writer *w, int data, int bits) { int bit; - for (bit = bits - 1; bit >= 0; bit--) vpx_write_bit(w, 1 & (data >> bit)); + for (bit = bits - 1; bit >= 0; bit--) aom_write_bit(w, 1 & (data >> bit)); } -#define vpx_write_prob(w, v) vpx_write_literal((w), (v), 8) +#define aom_write_prob(w, v) aom_write_literal((w), (v), 8) #ifdef __cplusplus } // extern "C" diff --git a/aom_dsp/bitwriter_buffer.c b/aom_dsp/bitwriter_buffer.c index 6e30e14ce62a63d5e5eb154d1a3b8f673f579a52..5f6b324c1ceaa67aeaa2716b8b01745a5aa450fe 100644 --- a/aom_dsp/bitwriter_buffer.c +++ b/aom_dsp/bitwriter_buffer.c @@ -12,14 +12,14 @@ #include #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "./bitwriter_buffer.h" -size_t vpx_wb_bytes_written(const struct vpx_write_bit_buffer *wb) { +size_t aom_wb_bytes_written(const struct aom_write_bit_buffer *wb) { return wb->bit_offset / CHAR_BIT + (wb->bit_offset % CHAR_BIT > 0); } -void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) { +void aom_wb_write_bit(struct aom_write_bit_buffer *wb, int bit) { const int off = (int)wb->bit_offset; const int p = off / CHAR_BIT; const int q = CHAR_BIT - 1 - off % CHAR_BIT; @@ -32,17 +32,17 @@ void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit) { wb->bit_offset = off + 1; } -void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits) { +void aom_wb_write_literal(struct aom_write_bit_buffer *wb, int data, int bits) { int bit; - for (bit = bits - 1; bit >= 0; bit--) vpx_wb_write_bit(wb, (data >> bit) & 1); + for (bit = bits - 1; bit >= 0; bit--) aom_wb_write_bit(wb, (data >> bit) & 1); } -void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data, +void aom_wb_write_inv_signed_literal(struct aom_write_bit_buffer *wb, int data, int bits) { #if CONFIG_MISC_FIXES - vpx_wb_write_literal(wb, data, bits + 1); + aom_wb_write_literal(wb, data, bits + 1); #else - vpx_wb_write_literal(wb, abs(data), bits); - vpx_wb_write_bit(wb, data < 0); + aom_wb_write_literal(wb, abs(data), bits); + aom_wb_write_bit(wb, data < 0); #endif } diff --git a/aom_dsp/bitwriter_buffer.h b/aom_dsp/bitwriter_buffer.h index 2e2b9b0d807a53117734857dc913978692919c63..9b33fe8443e4e0a59adfcaa4f57db809a9ab0e7f 100644 --- a/aom_dsp/bitwriter_buffer.h +++ b/aom_dsp/bitwriter_buffer.h @@ -12,24 +12,24 @@ #ifndef VPX_DSP_BITWRITER_BUFFER_H_ #define VPX_DSP_BITWRITER_BUFFER_H_ -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #ifdef __cplusplus extern "C" { #endif -struct vpx_write_bit_buffer { +struct aom_write_bit_buffer { uint8_t *bit_buffer; size_t bit_offset; }; -size_t vpx_wb_bytes_written(const struct vpx_write_bit_buffer *wb); +size_t aom_wb_bytes_written(const struct aom_write_bit_buffer *wb); -void vpx_wb_write_bit(struct vpx_write_bit_buffer *wb, int bit); +void aom_wb_write_bit(struct aom_write_bit_buffer *wb, int bit); -void vpx_wb_write_literal(struct vpx_write_bit_buffer *wb, int data, int bits); +void aom_wb_write_literal(struct aom_write_bit_buffer *wb, int data, int bits); -void vpx_wb_write_inv_signed_literal(struct vpx_write_bit_buffer *wb, int data, +void aom_wb_write_inv_signed_literal(struct aom_write_bit_buffer *wb, int data, int bits); #ifdef __cplusplus diff --git a/aom_dsp/fastssim.c b/aom_dsp/fastssim.c index a0ae6a20a9c7aa9d57325e1e6b50441f1b3796e6..b64b50b481ad5381cbcd8a02ffbd8ce632e5906a 100644 --- a/aom_dsp/fastssim.c +++ b/aom_dsp/fastssim.c @@ -14,8 +14,8 @@ #include #include #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/ssim.h" #include "aom_ports/system_state.h" /* TODO(jbb): High bit depth version of this code needed */ @@ -428,11 +428,11 @@ static double convert_ssim_db(double _ssim, double _weight) { return 10 * (log10(_weight) - log10(_weight - _ssim)); } -double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source, +double aom_calc_fastssim(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v) { double ssimv; - vpx_clear_system_state(); + aom_clear_system_state(); *ssim_y = calc_ssim(source->y_buffer, source->y_stride, dest->y_buffer, diff --git a/aom_dsp/fwd_txfm.c b/aom_dsp/fwd_txfm.c index 4cf8a9f1310fa3705e47cb9f66ec2d314c3ab8d0..68409f9a78a78a0532caafd11a8c89d07bf09666 100644 --- a/aom_dsp/fwd_txfm.c +++ b/aom_dsp/fwd_txfm.c @@ -11,7 +11,7 @@ #include "aom_dsp/fwd_txfm.h" -void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { // The 2D transform is done with two passes which are actually pretty // similar. In the first one, we transform the columns and transpose // the results. In the second one, we transform the rows. To achieve that, @@ -77,7 +77,7 @@ void vpx_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { } } -void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; tran_low_t sum = 0; for (r = 0; r < 4; ++r) @@ -87,7 +87,7 @@ void vpx_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) { output[1] = 0; } -void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { +void aom_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { int i, j; tran_low_t intermediate[64]; int pass; @@ -172,7 +172,7 @@ void vpx_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { } } -void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; tran_low_t sum = 0; for (r = 0; r < 8; ++r) @@ -182,7 +182,7 @@ void vpx_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) { output[1] = 0; } -void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { // The 2D transform is done with two passes which are actually pretty // similar. In the first one, we transform the columns and transpose // the results. In the second one, we transform the rows. To achieve that, @@ -362,7 +362,7 @@ void vpx_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { } } -void vpx_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; tran_low_t sum = 0; for (r = 0; r < 16; ++r) @@ -385,7 +385,7 @@ static INLINE tran_high_t half_round_shift(tran_high_t input) { return rv; } -void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) { +void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round) { tran_high_t step[32]; // Stage 1 step[0] = input[0] + input[(32 - 1)]; @@ -708,7 +708,7 @@ void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round) { output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64); } -void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { +void aom_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { int i, j; tran_high_t output[32 * 32]; @@ -716,7 +716,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4; - vpx_fdct32(temp_in, temp_out, 0); + aom_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; } @@ -725,7 +725,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; - vpx_fdct32(temp_in, temp_out, 0); + aom_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2); @@ -735,7 +735,7 @@ void vpx_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { // Note that although we use dct_32_round in dct32 computation flow, // this 2d fdct32x32 for rate-distortion optimization loop is operating // within 16 bits precision. -void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { +void aom_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { int i, j; tran_high_t output[32 * 32]; @@ -743,11 +743,11 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4; - vpx_fdct32(temp_in, temp_out, 0); + aom_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) // TODO(cd): see quality impact of only doing // output[j * 32 + i] = (temp_out[j] + 1) >> 2; - // PS: also change code in vpx_dsp/x86/vpx_dct_sse2.c + // PS: also change code in aom_dsp/x86/aom_dct_sse2.c output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2; } @@ -755,12 +755,12 @@ void vpx_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32]; - vpx_fdct32(temp_in, temp_out, 1); + aom_fdct32(temp_in, temp_out, 1); for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j]; } } -void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) { int r, c; tran_low_t sum = 0; for (r = 0; r < 32; ++r) @@ -771,42 +771,42 @@ void vpx_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) { } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output, +void aom_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) { - vpx_fdct4x4_c(input, output, stride); + aom_fdct4x4_c(input, output, stride); } -void vpx_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output, +void aom_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output, int stride) { - vpx_fdct8x8_c(input, final_output, stride); + aom_fdct8x8_c(input, final_output, stride); } -void vpx_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output, +void aom_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output, int stride) { - vpx_fdct8x8_1_c(input, final_output, stride); + aom_fdct8x8_1_c(input, final_output, stride); } -void vpx_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output, +void aom_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) { - vpx_fdct16x16_c(input, output, stride); + aom_fdct16x16_c(input, output, stride); } -void vpx_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output, +void aom_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) { - vpx_fdct16x16_1_c(input, output, stride); + aom_fdct16x16_1_c(input, output, stride); } -void vpx_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { - vpx_fdct32x32_c(input, out, stride); +void aom_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) { + aom_fdct32x32_c(input, out, stride); } -void vpx_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, +void aom_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) { - vpx_fdct32x32_rd_c(input, out, stride); + aom_fdct32x32_rd_c(input, out, stride); } -void vpx_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out, +void aom_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out, int stride) { - vpx_fdct32x32_1_c(input, out, stride); + aom_fdct32x32_1_c(input, out, stride); } #endif // CONFIG_VPX_HIGHBITDEPTH diff --git a/aom_dsp/fwd_txfm.h b/aom_dsp/fwd_txfm.h index 2ac17afd6c2df37e12067802b09a323b6666b8c7..97df577b8068a31d97cb46f51e299f2b580aae3e 100644 --- a/aom_dsp/fwd_txfm.h +++ b/aom_dsp/fwd_txfm.h @@ -22,5 +22,5 @@ static INLINE tran_high_t fdct_round_shift(tran_high_t input) { return rv; } -void vpx_fdct32(const tran_high_t *input, tran_high_t *output, int round); +void aom_fdct32(const tran_high_t *input, tran_high_t *output, int round); #endif // VPX_DSP_FWD_TXFM_H_ diff --git a/aom_dsp/intrapred.c b/aom_dsp/intrapred.c index 80999d817bc221e2e21031baca2e87fa01f95c85..e75faa61c1fa39c1ca285c7deaeced500ba726b3 100644 --- a/aom_dsp/intrapred.c +++ b/aom_dsp/intrapred.c @@ -9,11 +9,11 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_mem/vpx_mem.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_mem/aom_mem.h" #define DST(x, y) dst[(x) + (y)*stride] #define AVG3(a, b, c) (((a) + 2 * (b) + (c) + 2) >> 2) @@ -285,7 +285,7 @@ static INLINE void dc_predictor(uint8_t *dst, ptrdiff_t stride, int bs, } } -void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int H = above[-1]; const int I = left[0]; @@ -299,7 +299,7 @@ void vpx_he_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, memset(dst + stride * 3, AVG3(K, L, L), 4); } -void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int H = above[-1]; const int I = above[0]; @@ -317,7 +317,7 @@ void vpx_ve_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, memcpy(dst + stride * 3, dst, 4); } -void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int I = left[0]; const int J = left[1]; @@ -333,7 +333,7 @@ void vpx_d207_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 2) = DST(2, 2) = DST(0, 3) = DST(1, 3) = DST(2, 3) = DST(3, 3) = L; } -void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int A = above[0]; const int B = above[1]; @@ -356,7 +356,7 @@ void vpx_d63_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 3) = AVG3(E, F, G); // differs from vp8 } -void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int A = above[0]; const int B = above[1]; @@ -380,7 +380,7 @@ void vpx_d63f_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 3) = AVG3(F, G, H); } -void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int A = above[0]; const int B = above[1]; @@ -401,7 +401,7 @@ void vpx_d45_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 3) = H; // differs from vp8 } -void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int A = above[0]; const int B = above[1]; @@ -422,7 +422,7 @@ void vpx_d45e_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 3) = AVG3(G, H, H); } -void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int I = left[0]; const int J = left[1]; @@ -445,7 +445,7 @@ void vpx_d117_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 1) = AVG3(B, C, D); } -void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int I = left[0]; const int J = left[1]; @@ -466,7 +466,7 @@ void vpx_d135_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, DST(3, 0) = AVG3(D, C, B); } -void vpx_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, +void aom_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { const int I = left[0]; const int J = left[1]; @@ -682,7 +682,7 @@ static INLINE void highbd_h_predictor(uint16_t *dst, ptrdiff_t stride, int bs, (void)above; (void)bd; for (r = 0; r < bs; r++) { - vpx_memset16(dst, left[r], bs); + aom_memset16(dst, left[r], bs); dst += stride; } } @@ -709,7 +709,7 @@ static INLINE void highbd_dc_128_predictor(uint16_t *dst, ptrdiff_t stride, (void)left; for (r = 0; r < bs; r++) { - vpx_memset16(dst, 128 << (bd - 8), bs); + aom_memset16(dst, 128 << (bd - 8), bs); dst += stride; } } @@ -725,7 +725,7 @@ static INLINE void highbd_dc_left_predictor(uint16_t *dst, ptrdiff_t stride, expected_dc = (sum + (bs >> 1)) / bs; for (r = 0; r < bs; r++) { - vpx_memset16(dst, expected_dc, bs); + aom_memset16(dst, expected_dc, bs); dst += stride; } } @@ -741,7 +741,7 @@ static INLINE void highbd_dc_top_predictor(uint16_t *dst, ptrdiff_t stride, expected_dc = (sum + (bs >> 1)) / bs; for (r = 0; r < bs; r++) { - vpx_memset16(dst, expected_dc, bs); + aom_memset16(dst, expected_dc, bs); dst += stride; } } @@ -761,7 +761,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs, expected_dc = (sum + (count >> 1)) / count; for (r = 0; r < bs; r++) { - vpx_memset16(dst, expected_dc, bs); + aom_memset16(dst, expected_dc, bs); dst += stride; } } @@ -771,7 +771,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs, // can be unified and accessed as a pointer array. Note that the boundary // above and left are not necessarily used all the time. #define intra_pred_sized(type, size) \ - void vpx_##type##_predictor_##size##x##size##_c( \ + void aom_##type##_predictor_##size##x##size##_c( \ uint8_t *dst, ptrdiff_t stride, const uint8_t *above, \ const uint8_t *left) { \ type##_predictor(dst, stride, size, above, left); \ @@ -779,7 +779,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs, #if CONFIG_VPX_HIGHBITDEPTH #define intra_pred_highbd_sized(type, size) \ - void vpx_highbd_##type##_predictor_##size##x##size##_c( \ + void aom_highbd_##type##_predictor_##size##x##size##_c( \ uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \ const uint16_t *left, int bd) { \ highbd_##type##_predictor(dst, stride, size, above, left, bd); \ diff --git a/aom_dsp/inv_txfm.c b/aom_dsp/inv_txfm.c index 1b01de1e843f085aefc327598e41ffd23b6ce88c..0e40949b13fa015132dfb26e0726450cb86da2eb 100644 --- a/aom_dsp/inv_txfm.c +++ b/aom_dsp/inv_txfm.c @@ -14,7 +14,7 @@ #include "aom_dsp/inv_txfm.h" -void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, 0.5 shifts per pixel. */ int i; @@ -66,7 +66,7 @@ void vpx_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { } } -void vpx_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) { +void aom_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest, int dest_stride) { int i; tran_high_t a1, e1; tran_low_t tmp[4]; @@ -112,7 +112,7 @@ void idct4_c(const tran_low_t *input, tran_low_t *output) { output[3] = WRAPLOW(step[0] - step[3], 8); } -void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[4 * 4]; tran_low_t *outptr = out; int i, j; @@ -136,7 +136,7 @@ void vpx_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) { } } -void vpx_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, +void aom_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest, int dest_stride) { int i; tran_high_t a1; @@ -207,7 +207,7 @@ void idct8_c(const tran_low_t *input, tran_low_t *output) { output[7] = WRAPLOW(step1[0] - step1[7], 8); } -void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[8 * 8]; tran_low_t *outptr = out; int i, j; @@ -231,7 +231,7 @@ void vpx_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) { } } -void vpx_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { int i, j; tran_high_t a1; tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); @@ -357,7 +357,7 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output) { output[7] = WRAPLOW(-x1, 8); } -void vpx_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[8 * 8] = { 0 }; tran_low_t *outptr = out; int i, j; @@ -547,7 +547,7 @@ void idct16_c(const tran_low_t *input, tran_low_t *output) { output[15] = WRAPLOW(step2[0] - step2[15], 8); } -void vpx_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, +void aom_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[16 * 16]; tran_low_t *outptr = out; @@ -743,7 +743,7 @@ void iadst16_c(const tran_low_t *input, tran_low_t *output) { output[15] = WRAPLOW(-x1, 8); } -void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, +void aom_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[16 * 16] = { 0 }; tran_low_t *outptr = out; @@ -769,7 +769,7 @@ void vpx_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { int i, j; tran_high_t a1; tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); @@ -1148,7 +1148,7 @@ void idct32_c(const tran_low_t *input, tran_low_t *output) { output[31] = WRAPLOW(step1[0] - step1[31], 8); } -void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, +void aom_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[32 * 32]; tran_low_t *outptr = out; @@ -1185,7 +1185,7 @@ void vpx_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, +void aom_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[32 * 32] = { 0 }; tran_low_t *outptr = out; @@ -1211,7 +1211,7 @@ void vpx_idct32x32_135_add_c(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, +void aom_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, int stride) { tran_low_t out[32 * 32] = { 0 }; tran_low_t *outptr = out; @@ -1237,7 +1237,7 @@ void vpx_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { +void aom_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { int i, j; tran_high_t a1; @@ -1252,7 +1252,7 @@ void vpx_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) { } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds, 0.5 shifts per pixel. */ @@ -1306,7 +1306,7 @@ void vpx_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8, +void aom_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8, int dest_stride, int bd) { int i; tran_high_t a1, e1; @@ -1339,7 +1339,7 @@ void vpx_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8, } } -void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) { +void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t step[4]; tran_high_t temp1, temp2; (void)bd; @@ -1360,7 +1360,7 @@ void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) { output[3] = WRAPLOW(step[0] - step[3], bd); } -void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[4 * 4]; tran_low_t *outptr = out; @@ -1370,7 +1370,7 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, // Rows for (i = 0; i < 4; ++i) { - vpx_highbd_idct4_c(input, outptr, bd); + aom_highbd_idct4_c(input, outptr, bd); input += 4; outptr += 4; } @@ -1378,7 +1378,7 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, // Columns for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; - vpx_highbd_idct4_c(temp_in, temp_out, bd); + aom_highbd_idct4_c(temp_in, temp_out, bd); for (j = 0; j < 4; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd); @@ -1386,7 +1386,7 @@ void vpx_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8, int dest_stride, int bd) { int i; tran_high_t a1; @@ -1406,7 +1406,7 @@ void vpx_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) { +void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t step1[8], step2[8]; tran_high_t temp1, temp2; // stage 1 @@ -1424,7 +1424,7 @@ void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) { step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd); // stage 2 & stage 3 - even half - vpx_highbd_idct4_c(step1, step1, bd); + aom_highbd_idct4_c(step1, step1, bd); // stage 2 - odd half step2[4] = WRAPLOW(step1[4] + step1[5], bd); @@ -1451,7 +1451,7 @@ void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) { output[7] = WRAPLOW(step1[0] - step1[7], bd); } -void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[8 * 8]; tran_low_t *outptr = out; @@ -1461,7 +1461,7 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, // First transform rows. for (i = 0; i < 8; ++i) { - vpx_highbd_idct8_c(input, outptr, bd); + aom_highbd_idct8_c(input, outptr, bd); input += 8; outptr += 8; } @@ -1469,7 +1469,7 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, // Then transform columns. for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - vpx_highbd_idct8_c(temp_in, temp_out, bd); + aom_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); @@ -1477,7 +1477,7 @@ void vpx_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { int i, j; tran_high_t a1; @@ -1492,7 +1492,7 @@ void vpx_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { +void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; tran_low_t x0 = input[0]; @@ -1530,7 +1530,7 @@ void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) { output[3] = WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3, bd), bd); } -void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { +void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_high_t s0, s1, s2, s3, s4, s5, s6, s7; tran_low_t x0 = input[7]; @@ -1607,7 +1607,7 @@ void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) { output[7] = WRAPLOW(-x1, bd); } -void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[8 * 8] = { 0 }; tran_low_t *outptr = out; @@ -1618,14 +1618,14 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, // First transform rows. // Only first 4 row has non-zero coefs. for (i = 0; i < 4; ++i) { - vpx_highbd_idct8_c(input, outptr, bd); + aom_highbd_idct8_c(input, outptr, bd); input += 8; outptr += 8; } // Then transform columns. for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - vpx_highbd_idct8_c(temp_in, temp_out, bd); + aom_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); @@ -1633,7 +1633,7 @@ void vpx_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { +void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_low_t step1[16], step2[16]; tran_high_t temp1, temp2; (void)bd; @@ -1799,7 +1799,7 @@ void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) { output[15] = WRAPLOW(step2[0] - step2[15], bd); } -void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[16 * 16]; tran_low_t *outptr = out; @@ -1809,7 +1809,7 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, // First transform rows. for (i = 0; i < 16; ++i) { - vpx_highbd_idct16_c(input, outptr, bd); + aom_highbd_idct16_c(input, outptr, bd); input += 16; outptr += 16; } @@ -1817,7 +1817,7 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, // Then transform columns. for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; - vpx_highbd_idct16_c(temp_in, temp_out, bd); + aom_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); @@ -1825,7 +1825,7 @@ void vpx_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { +void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8; tran_high_t s9, s10, s11, s12, s13, s14, s15; @@ -1995,7 +1995,7 @@ void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd) { output[15] = WRAPLOW(-x1, bd); } -void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[16 * 16] = { 0 }; tran_low_t *outptr = out; @@ -2006,7 +2006,7 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, // First transform rows. Since all non-zero dct coefficients are in // upper-left 4x4 area, we only need to calculate first 4 rows here. for (i = 0; i < 4; ++i) { - vpx_highbd_idct16_c(input, outptr, bd); + aom_highbd_idct16_c(input, outptr, bd); input += 16; outptr += 16; } @@ -2014,7 +2014,7 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, // Then transform columns. for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; - vpx_highbd_idct16_c(temp_in, temp_out, bd); + aom_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); @@ -2022,7 +2022,7 @@ void vpx_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { int i, j; tran_high_t a1; @@ -2407,7 +2407,7 @@ static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output, output[31] = WRAPLOW(step1[0] - step1[31], bd); } -void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[32 * 32]; tran_low_t *outptr = out; @@ -2445,7 +2445,7 @@ void vpx_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[32 * 32] = { 0 }; tran_low_t *outptr = out; @@ -2471,7 +2471,7 @@ void vpx_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { int i, j; int a1; diff --git a/aom_dsp/inv_txfm.h b/aom_dsp/inv_txfm.h index 7723f01fd4580949acb64fc44d4f2f27f2301afe..c071670ed7b5f18ed252fc97c278aa56c2a160bc 100644 --- a/aom_dsp/inv_txfm.h +++ b/aom_dsp/inv_txfm.h @@ -14,7 +14,7 @@ #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/txfm_common.h" #include "aom_ports/mem.h" @@ -97,13 +97,13 @@ void iadst8_c(const tran_low_t *input, tran_low_t *output); void iadst16_c(const tran_low_t *input, tran_low_t *output); #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd); -void vpx_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd); -void vpx_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd); +void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd); +void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd); +void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd); -void vpx_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd); -void vpx_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd); -void vpx_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd); +void aom_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd); +void aom_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd); +void aom_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd); static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans, int bd) { diff --git a/aom_dsp/loopfilter.c b/aom_dsp/loopfilter.c index 39d522aa245c04beb382580e858cc4c54ccdc636..da9ea918e489f15b8ea3fb14843673a04d636637 100644 --- a/aom_dsp/loopfilter.c +++ b/aom_dsp/loopfilter.c @@ -11,8 +11,8 @@ #include -#include "./vpx_config.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "./aom_config.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" static INLINE int8_t signed_char_clamp(int t) { @@ -109,7 +109,7 @@ static INLINE void filter4(int8_t mask, uint8_t thresh, uint8_t *op1, *op1 = signed_char_clamp(ps1 + filter) ^ 0x80; } -void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */, +void aom_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -126,15 +126,15 @@ void vpx_lpf_horizontal_4_c(uint8_t *s, int p /* pitch */, } } -void vpx_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_horizontal_4_dual_c(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1); + aom_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, +void aom_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -151,12 +151,12 @@ void vpx_lpf_vertical_4_c(uint8_t *s, int pitch, const uint8_t *blimit, } } -void vpx_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, +void aom_lpf_vertical_4_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1); + aom_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1); + aom_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1); } static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat, @@ -179,7 +179,7 @@ static INLINE void filter8(int8_t mask, uint8_t thresh, uint8_t flat, } } -void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit, +void aom_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -199,15 +199,15 @@ void vpx_lpf_horizontal_8_c(uint8_t *s, int p, const uint8_t *blimit, } } -void vpx_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_horizontal_8_dual_c(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1); + aom_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, +void aom_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -224,12 +224,12 @@ void vpx_lpf_vertical_8_c(uint8_t *s, int pitch, const uint8_t *blimit, } } -void vpx_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, +void aom_lpf_vertical_8_dual_c(uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1); + aom_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1); + aom_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1); } static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat, @@ -286,7 +286,7 @@ static INLINE void filter16(int8_t mask, uint8_t thresh, uint8_t flat, } } -void vpx_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit, +void aom_lpf_horizontal_16_c(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { int i; @@ -332,12 +332,12 @@ static void mb_lpf_vertical_edge_w(uint8_t *s, int p, const uint8_t *blimit, } } -void vpx_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit, +void aom_lpf_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8); } -void vpx_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit, +void aom_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16); } @@ -433,7 +433,7 @@ static INLINE void highbd_filter4(int8_t mask, uint8_t thresh, uint16_t *op1, *op1 = signed_char_clamp_high(ps1 + filter, bd) + (0x80 << shift); } -void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */, +void aom_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { int i; @@ -456,15 +456,15 @@ void vpx_highbd_lpf_horizontal_4_c(uint16_t *s, int p /* pitch */, } } -void vpx_highbd_lpf_horizontal_4_dual_c( +void aom_highbd_lpf_horizontal_4_dual_c( uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd) { - vpx_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1, bd); + aom_highbd_lpf_horizontal_4_c(s, p, blimit0, limit0, thresh0, 1, bd); + aom_highbd_lpf_horizontal_4_c(s + 8, p, blimit1, limit1, thresh1, 1, bd); } -void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit, +void aom_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { int i; @@ -481,12 +481,12 @@ void vpx_highbd_lpf_vertical_4_c(uint16_t *s, int pitch, const uint8_t *blimit, } } -void vpx_highbd_lpf_vertical_4_dual_c( +void aom_highbd_lpf_vertical_4_dual_c( uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd) { - vpx_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1, + aom_highbd_lpf_vertical_4_c(s, pitch, blimit0, limit0, thresh0, 1, bd); + aom_highbd_lpf_vertical_4_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1, bd); } @@ -510,7 +510,7 @@ static INLINE void highbd_filter8(int8_t mask, uint8_t thresh, uint8_t flat, } } -void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit, +void aom_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { int i; @@ -531,15 +531,15 @@ void vpx_highbd_lpf_horizontal_8_c(uint16_t *s, int p, const uint8_t *blimit, } } -void vpx_highbd_lpf_horizontal_8_dual_c( +void aom_highbd_lpf_horizontal_8_dual_c( uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd) { - vpx_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1, bd); + aom_highbd_lpf_horizontal_8_c(s, p, blimit0, limit0, thresh0, 1, bd); + aom_highbd_lpf_horizontal_8_c(s + 8, p, blimit1, limit1, thresh1, 1, bd); } -void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit, +void aom_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { int i; @@ -557,12 +557,12 @@ void vpx_highbd_lpf_vertical_8_c(uint16_t *s, int pitch, const uint8_t *blimit, } } -void vpx_highbd_lpf_vertical_8_dual_c( +void aom_highbd_lpf_vertical_8_dual_c( uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd) { - vpx_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1, bd); - vpx_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1, + aom_highbd_lpf_vertical_8_c(s, pitch, blimit0, limit0, thresh0, 1, bd); + aom_highbd_lpf_vertical_8_c(s + 8 * pitch, pitch, blimit1, limit1, thresh1, 1, bd); } @@ -632,7 +632,7 @@ static INLINE void highbd_filter16(int8_t mask, uint8_t thresh, uint8_t flat, } } -void vpx_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit, +void aom_highbd_lpf_horizontal_16_c(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { int i; @@ -694,13 +694,13 @@ static void highbd_mb_lpf_vertical_edge_w(uint16_t *s, int p, } } -void vpx_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit, +void aom_highbd_lpf_vertical_16_c(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd) { highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 8, bd); } -void vpx_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p, +void aom_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd) { diff --git a/aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c similarity index 99% rename from aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c rename to aom_dsp/mips/aom_convolve8_avg_horiz_msa.c index 06c1af35889bf96149bfa65bd07daf92653401fa..847394a3d214708da9556fdbb8027b2fd9ad952b 100644 --- a/aom_dsp/mips/vpx_convolve8_avg_horiz_msa.c +++ b/aom_dsp/mips/aom_convolve8_avg_horiz_msa.c @@ -10,8 +10,8 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/mips/vpx_convolve_msa.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/mips/aom_convolve_msa.h" static void common_hz_8t_and_aver_dst_4x4_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, @@ -632,7 +632,7 @@ static void common_hz_2t_and_aver_dst_64w_msa(const uint8_t *src, } } -void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -669,7 +669,7 @@ void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, (int32_t)dst_stride, &filt_hor[3], h); break; default: - vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } @@ -696,7 +696,7 @@ void vpx_convolve8_avg_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, (int32_t)dst_stride, filt_hor, h); break; default: - vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/vpx_convolve8_avg_msa.c b/aom_dsp/mips/aom_convolve8_avg_msa.c similarity index 98% rename from aom_dsp/mips/vpx_convolve8_avg_msa.c rename to aom_dsp/mips/aom_convolve8_avg_msa.c index 7b828cf4c02e25e884991563c877d17ccf5b5950..bed600d5b9dd473f0f4a69ccb3f28895c06dd861 100644 --- a/aom_dsp/mips/vpx_convolve8_avg_msa.c +++ b/aom_dsp/mips/aom_convolve8_avg_msa.c @@ -10,8 +10,8 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/mips/vpx_convolve_msa.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/mips/aom_convolve_msa.h" static void common_hv_8ht_8vt_and_aver_dst_4w_msa( const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, @@ -515,7 +515,7 @@ static void common_hv_2ht_2vt_and_aver_dst_64w_msa( } } -void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -561,13 +561,13 @@ void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride, &filt_hor[3], &filt_ver[3], h); break; default: - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } } else if (((const int32_t *)filter_x)[0] == 0 || ((const int32_t *)filter_y)[0] == 0) { - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } else { switch (w) { @@ -597,7 +597,7 @@ void vpx_convolve8_avg_msa(const uint8_t *src, ptrdiff_t src_stride, filt_ver, h); break; default: - vpx_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/vpx_convolve8_avg_vert_msa.c b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c similarity index 99% rename from aom_dsp/mips/vpx_convolve8_avg_vert_msa.c rename to aom_dsp/mips/aom_convolve8_avg_vert_msa.c index cc68603cf1a679c52f5d9b26590553eac2a16531..dae771104f18cfd9b76c9939cfb0f7dde7cf19f5 100644 --- a/aom_dsp/mips/vpx_convolve8_avg_vert_msa.c +++ b/aom_dsp/mips/aom_convolve8_avg_vert_msa.c @@ -10,8 +10,8 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/mips/vpx_convolve_msa.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/mips/aom_convolve_msa.h" static void common_vt_8t_and_aver_dst_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, @@ -604,7 +604,7 @@ static void common_vt_2t_and_aver_dst_64w_msa(const uint8_t *src, } } -void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -641,7 +641,7 @@ void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride, (int32_t)dst_stride, &filt_ver[3], h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } @@ -669,7 +669,7 @@ void vpx_convolve8_avg_vert_msa(const uint8_t *src, ptrdiff_t src_stride, (int32_t)dst_stride, filt_ver, h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/vpx_convolve8_horiz_msa.c b/aom_dsp/mips/aom_convolve8_horiz_msa.c similarity index 98% rename from aom_dsp/mips/vpx_convolve8_horiz_msa.c rename to aom_dsp/mips/aom_convolve8_horiz_msa.c index 84a73a6f187d0058c2bcac27a218371cdc095575..fc3a823c5702e0f4eb6de831b942a04ed0f35fde 100644 --- a/aom_dsp/mips/vpx_convolve8_horiz_msa.c +++ b/aom_dsp/mips/aom_convolve8_horiz_msa.c @@ -10,8 +10,8 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/mips/vpx_convolve_msa.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/mips/aom_convolve_msa.h" static void common_hz_8t_4x4_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, @@ -620,7 +620,7 @@ static void common_hz_2t_64w_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -657,7 +657,7 @@ void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, &filt_hor[3], h); break; default: - vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } @@ -684,7 +684,7 @@ void vpx_convolve8_horiz_msa(const uint8_t *src, ptrdiff_t src_stride, filt_hor, h); break; default: - vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/vpx_convolve8_msa.c b/aom_dsp/mips/aom_convolve8_msa.c similarity index 98% rename from aom_dsp/mips/vpx_convolve8_msa.c rename to aom_dsp/mips/aom_convolve8_msa.c index 673cfa9cb7b49df38a754e01d8f0d0125f6a906b..a4d594931a93989b32c63ee4b7d8c757c12149b2 100644 --- a/aom_dsp/mips/vpx_convolve8_msa.c +++ b/aom_dsp/mips/aom_convolve8_msa.c @@ -10,8 +10,8 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/mips/vpx_convolve_msa.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/mips/aom_convolve_msa.h" const uint8_t mc_filt_mask_arr[16 * 3] = { /* 8 width cases */ @@ -541,7 +541,7 @@ static void common_hv_2ht_2vt_64w_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int32_t x_step_q4, const int16_t *filter_y, int32_t y_step_q4, int32_t w, int32_t h) { @@ -586,13 +586,13 @@ void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, &filt_ver[3], (int32_t)h); break; default: - vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } } else if (((const int32_t *)filter_x)[0] == 0 || ((const int32_t *)filter_y)[0] == 0) { - vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } else { switch (w) { @@ -622,7 +622,7 @@ void vpx_convolve8_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, (int32_t)h); break; default: - vpx_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, + aom_convolve8_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/vpx_convolve8_vert_msa.c b/aom_dsp/mips/aom_convolve8_vert_msa.c similarity index 99% rename from aom_dsp/mips/vpx_convolve8_vert_msa.c rename to aom_dsp/mips/aom_convolve8_vert_msa.c index 4cf825764e86fb60193cb3116faa4a9dad9cc55b..f7bdfc2bd7b08e1cdec3397f92739a944ea47717 100644 --- a/aom_dsp/mips/vpx_convolve8_vert_msa.c +++ b/aom_dsp/mips/aom_convolve8_vert_msa.c @@ -10,8 +10,8 @@ */ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/mips/vpx_convolve_msa.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/mips/aom_convolve_msa.h" static void common_vt_8t_4w_msa(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, @@ -627,7 +627,7 @@ static void common_vt_2t_64w_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -664,7 +664,7 @@ void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride, &filt_ver[3], h); break; default: - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } @@ -691,7 +691,7 @@ void vpx_convolve8_vert_msa(const uint8_t *src, ptrdiff_t src_stride, filt_ver, h); break; default: - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/vpx_convolve_avg_msa.c b/aom_dsp/mips/aom_convolve_avg_msa.c similarity index 99% rename from aom_dsp/mips/vpx_convolve_avg_msa.c rename to aom_dsp/mips/aom_convolve_avg_msa.c index 5ee791c4092455b806eb8afe09bdb145a51f72f8..75f8c7ea85aef3e82127e84fe794d3bc53d6989e 100644 --- a/aom_dsp/mips/vpx_convolve_avg_msa.c +++ b/aom_dsp/mips/aom_convolve_avg_msa.c @@ -187,7 +187,7 @@ static void avg_width64_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve_avg_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int32_t filter_x_stride, const int16_t *filter_y, int32_t filter_y_stride, diff --git a/aom_dsp/mips/vpx_convolve_copy_msa.c b/aom_dsp/mips/aom_convolve_copy_msa.c similarity index 99% rename from aom_dsp/mips/vpx_convolve_copy_msa.c rename to aom_dsp/mips/aom_convolve_copy_msa.c index ad4e4cde5eaa068ac05e50d8a3721390fd37ac3b..f7f116f4da6ce3bf1ecea626a5313cbe933aecb8 100644 --- a/aom_dsp/mips/vpx_convolve_copy_msa.c +++ b/aom_dsp/mips/aom_convolve_copy_msa.c @@ -197,7 +197,7 @@ static void copy_width64_msa(const uint8_t *src, int32_t src_stride, copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64); } -void vpx_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve_copy_msa(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int32_t filter_x_stride, const int16_t *filter_y, int32_t filter_y_stride, diff --git a/aom_dsp/mips/vpx_convolve_msa.h b/aom_dsp/mips/aom_convolve_msa.h similarity index 99% rename from aom_dsp/mips/vpx_convolve_msa.h rename to aom_dsp/mips/aom_convolve_msa.h index 90c169eaa60a01d90896b2197bf477f8fd513836..27227029e8cfc98a58405a935616a559d421e56c 100644 --- a/aom_dsp/mips/vpx_convolve_msa.h +++ b/aom_dsp/mips/aom_convolve_msa.h @@ -13,7 +13,7 @@ #define VPX_DSP_MIPS_VPX_CONVOLVE_MSA_H_ #include "aom_dsp/mips/macros_msa.h" -#include "aom_dsp/vpx_filter.h" +#include "aom_dsp/aom_filter.h" extern const uint8_t mc_filt_mask_arr[16 * 3]; diff --git a/aom_dsp/mips/avg_msa.c b/aom_dsp/mips/avg_msa.c index f556134504b685ed0eeda4b5fd6be3758b6b183b..0e172815532b7aae1e599c325e184ece751dc552 100644 --- a/aom_dsp/mips/avg_msa.c +++ b/aom_dsp/mips/avg_msa.c @@ -9,10 +9,10 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/macros_msa.h" -uint32_t vpx_avg_8x8_msa(const uint8_t *src, int32_t src_stride) { +uint32_t aom_avg_8x8_msa(const uint8_t *src, int32_t src_stride) { uint32_t sum_out; v16u8 src0, src1, src2, src3, src4, src5, src6, src7; v8u16 sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7; @@ -34,7 +34,7 @@ uint32_t vpx_avg_8x8_msa(const uint8_t *src, int32_t src_stride) { return sum_out; } -uint32_t vpx_avg_4x4_msa(const uint8_t *src, int32_t src_stride) { +uint32_t aom_avg_4x4_msa(const uint8_t *src, int32_t src_stride) { uint32_t sum_out; uint32_t src0, src1, src2, src3; v16u8 vec = { 0 }; diff --git a/aom_dsp/mips/common_dspr2.c b/aom_dsp/mips/common_dspr2.c index d749a99429ed64eee703616441aeb243979b90ef..00ab75dc31dba3ceeb665216d9e9edf026b6f5a8 100644 --- a/aom_dsp/mips/common_dspr2.c +++ b/aom_dsp/mips/common_dspr2.c @@ -12,20 +12,20 @@ #include "aom_dsp/mips/common_dspr2.h" #if HAVE_DSPR2 -uint8_t vpx_ff_cropTbl_a[256 + 2 * CROP_WIDTH]; -uint8_t *vpx_ff_cropTbl; +uint8_t aom_ff_cropTbl_a[256 + 2 * CROP_WIDTH]; +uint8_t *aom_ff_cropTbl; -void vpx_dsputil_static_init(void) { +void aom_dsputil_static_init(void) { int i; - for (i = 0; i < 256; i++) vpx_ff_cropTbl_a[i + CROP_WIDTH] = i; + for (i = 0; i < 256; i++) aom_ff_cropTbl_a[i + CROP_WIDTH] = i; for (i = 0; i < CROP_WIDTH; i++) { - vpx_ff_cropTbl_a[i] = 0; - vpx_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255; + aom_ff_cropTbl_a[i] = 0; + aom_ff_cropTbl_a[i + CROP_WIDTH + 256] = 255; } - vpx_ff_cropTbl = &vpx_ff_cropTbl_a[CROP_WIDTH]; + aom_ff_cropTbl = &aom_ff_cropTbl_a[CROP_WIDTH]; } #endif diff --git a/aom_dsp/mips/common_dspr2.h b/aom_dsp/mips/common_dspr2.h index 2d1bdf1421fc32723948d56b64d41f107555bef8..efb9afddae13f2f73e64e6436a18985be2f38715 100644 --- a/aom_dsp/mips/common_dspr2.h +++ b/aom_dsp/mips/common_dspr2.h @@ -13,8 +13,8 @@ #define VPX_COMMON_MIPS_DSPR2_H_ #include -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #ifdef __cplusplus extern "C" { @@ -22,7 +22,7 @@ extern "C" { #if HAVE_DSPR2 #define CROP_WIDTH 512 -extern uint8_t *vpx_ff_cropTbl; // From "aom_dsp/mips/intrapred4_dspr2.c" +extern uint8_t *aom_ff_cropTbl; // From "aom_dsp/mips/intrapred4_dspr2.c" static INLINE void prefetch_load(const unsigned char *src) { __asm__ __volatile__("pref 0, 0(%[src]) \n\t" : : [src] "r"(src)); diff --git a/aom_dsp/mips/convolve2_avg_dspr2.c b/aom_dsp/mips/convolve2_avg_dspr2.c index ce55d62793c571c191a9dcde967cac38cd3c0669..d557115b92fcf60297a4a474ac49fe0a365e9e4b 100644 --- a/aom_dsp/mips/convolve2_avg_dspr2.c +++ b/aom_dsp/mips/convolve2_avg_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -26,7 +26,7 @@ static void convolve_bi_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2; uint32_t p1, p2; @@ -125,7 +125,7 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2; uint32_t p1, p2; @@ -218,7 +218,7 @@ static void convolve_bi_avg_vert_64_dspr2(const uint8_t *src, } } -void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -248,7 +248,7 @@ void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c index b54244843d9b3e5fa5811a257a8b76fe1f7d13cd..efbdcf60fb34e17b100586186d5deefd07fc2267 100644 --- a/aom_dsp/mips/convolve2_avg_horiz_dspr2.c +++ b/aom_dsp/mips/convolve2_avg_horiz_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -24,7 +24,7 @@ static void convolve_bi_avg_horiz_4_dspr2(const uint8_t *src, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; int32_t Temp1, Temp2, Temp3, Temp4; uint32_t vector4a = 64; uint32_t tp1, tp2; @@ -115,7 +115,7 @@ static void convolve_bi_avg_horiz_8_dspr2(const uint8_t *src, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; int32_t Temp1, Temp2, Temp3; uint32_t tp1, tp2, tp3, tp4; @@ -262,7 +262,7 @@ static void convolve_bi_avg_horiz_16_dspr2(const uint8_t *src_ptr, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2, qload3; @@ -509,7 +509,7 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2, qload3; @@ -750,7 +750,7 @@ static void convolve_bi_avg_horiz_64_dspr2(const uint8_t *src_ptr, } } -void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, @@ -794,7 +794,7 @@ void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, h); break; default: - vpx_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/convolve2_dspr2.c b/aom_dsp/mips/convolve2_dspr2.c index 385eef2b52bf931051c5efc0d12e213d6f45cc9c..066308315d311a49b0451551689eb8b1a188e933 100644 --- a/aom_dsp/mips/convolve2_dspr2.c +++ b/aom_dsp/mips/convolve2_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_dsp/vpx_filter.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/aom_filter.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -23,7 +23,7 @@ static void convolve_bi_horiz_4_transposed_dspr2( const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint8_t *dst_ptr; int32_t Temp1, Temp2; uint32_t vector4a = 64; @@ -107,7 +107,7 @@ static void convolve_bi_horiz_8_transposed_dspr2( const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint8_t *dst_ptr; uint32_t vector4a = 64; int32_t Temp1, Temp2, Temp3; @@ -243,7 +243,7 @@ static void convolve_bi_horiz_16_transposed_dspr2( int32_t c, y; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2; @@ -608,7 +608,7 @@ static void convolve_bi_horiz_64_transposed_dspr2( int32_t c, y; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2; @@ -988,7 +988,7 @@ void convolve_bi_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, } } -void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter, int w, int h) { uint32_t pos = 38; diff --git a/aom_dsp/mips/convolve2_horiz_dspr2.c b/aom_dsp/mips/convolve2_horiz_dspr2.c index 06899908cd851405a2785517ea2ef336b1c814f3..dc51ab1cb797416c8932b4f08d4125714b7e52bf 100644 --- a/aom_dsp/mips/convolve2_horiz_dspr2.c +++ b/aom_dsp/mips/convolve2_horiz_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -23,7 +23,7 @@ static void convolve_bi_horiz_4_dspr2(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; int32_t Temp1, Temp2, Temp3, Temp4; uint32_t vector4a = 64; uint32_t tp1, tp2; @@ -100,7 +100,7 @@ static void convolve_bi_horiz_8_dspr2(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; int32_t Temp1, Temp2, Temp3; uint32_t tp1, tp2, tp3; @@ -224,7 +224,7 @@ static void convolve_bi_horiz_16_dspr2(const uint8_t *src_ptr, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2, qload3; @@ -428,7 +428,7 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t Temp1, Temp2, Temp3; uint32_t qload1, qload2, qload3; @@ -627,7 +627,7 @@ static void convolve_bi_horiz_64_dspr2(const uint8_t *src_ptr, } } -void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -673,7 +673,7 @@ void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, (int32_t)dst_stride, filter_x, (int32_t)h); break; default: - vpx_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_horiz_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/convolve2_vert_dspr2.c b/aom_dsp/mips/convolve2_vert_dspr2.c index dc4d57ec6b1217f5d88bc822e89f95e1e808e805..3367be01a2d8167cfba327c03c6be612e9372130 100644 --- a/aom_dsp/mips/convolve2_vert_dspr2.c +++ b/aom_dsp/mips/convolve2_vert_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -26,7 +26,7 @@ static void convolve_bi_vert_4_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2; uint32_t p1, p2; @@ -116,7 +116,7 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2; uint32_t p1, p2; @@ -200,7 +200,7 @@ static void convolve_bi_vert_64_dspr2(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -229,7 +229,7 @@ void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, convolve_bi_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h); break; default: - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/convolve8_avg_dspr2.c b/aom_dsp/mips/convolve8_avg_dspr2.c index dc07844aa147a3e6868a39f894f6c1c8a752efc7..298065adb035700889bcbb39f2c0d4497a622a40 100644 --- a/aom_dsp/mips/convolve8_avg_dspr2.c +++ b/aom_dsp/mips/convolve8_avg_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -26,7 +26,7 @@ static void convolve_avg_vert_4_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2, load3, load4; uint32_t p1, p2; @@ -182,7 +182,7 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2, load3, load4; uint32_t p1, p2; @@ -333,7 +333,7 @@ static void convolve_avg_vert_64_dspr2(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -342,7 +342,7 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(((const int32_t *)filter_y)[1] != 0x800000); if (((const int32_t *)filter_y)[0] == 0) { - vpx_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x, + aom_convolve2_avg_vert_dspr2(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; @@ -368,14 +368,14 @@ void vpx_convolve8_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, h); break; default: - vpx_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_avg_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } } } -void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -391,14 +391,14 @@ void vpx_convolve8_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, if (intermediate_height < h) intermediate_height = h; - vpx_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x, + aom_convolve8_horiz(src - (src_stride * 3), src_stride, temp, 64, filter_x, x_step_q4, filter_y, y_step_q4, w, intermediate_height); - vpx_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x, + aom_convolve8_avg_vert(temp + 64 * 3, 64, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } -void vpx_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve_avg_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, int w, diff --git a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c index da0db37ac21bfd973ae3323356f7727a9bdecec9..f6534b4205e20f6192681f81350bb5c94ad62565 100644 --- a/aom_dsp/mips/convolve8_avg_horiz_dspr2.c +++ b/aom_dsp/mips/convolve8_avg_horiz_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_convolve.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_convolve.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -23,7 +23,7 @@ static void convolve_avg_horiz_4_dspr2(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; int32_t vector1b, vector2b, vector3b, vector4b; int32_t Temp1, Temp2, Temp3, Temp4; uint32_t vector4a = 64; @@ -140,7 +140,7 @@ static void convolve_avg_horiz_8_dspr2(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; int32_t vector1b, vector2b, vector3b, vector4b; int32_t Temp1, Temp2, Temp3; @@ -326,7 +326,7 @@ static void convolve_avg_horiz_16_dspr2(const uint8_t *src_ptr, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t filter12, filter34, filter56, filter78; int32_t Temp1, Temp2, Temp3; @@ -634,7 +634,7 @@ static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t filter12, filter34, filter56, filter78; int32_t Temp1, Temp2, Temp3; @@ -937,7 +937,7 @@ static void convolve_avg_horiz_64_dspr2(const uint8_t *src_ptr, } } -void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, @@ -946,7 +946,7 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(((const int32_t *)filter_x)[1] != 0x800000); if (((const int32_t *)filter_x)[0] == 0) { - vpx_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x, + aom_convolve2_avg_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; @@ -988,7 +988,7 @@ void vpx_convolve8_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, h); break; default: - vpx_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride, + aom_convolve8_avg_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; diff --git a/aom_dsp/mips/convolve8_dspr2.c b/aom_dsp/mips/convolve8_dspr2.c index fc4f759c96ca17161d3ef042eb35ccd7a0b0f76a..c871702f4b723f419b697ac5247a85a055db72ac 100644 --- a/aom_dsp/mips/convolve8_dspr2.c +++ b/aom_dsp/mips/convolve8_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_dsp/vpx_filter.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/aom_filter.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -25,7 +25,7 @@ static void convolve_horiz_4_transposed_dspr2(const uint8_t *src, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint8_t *dst_ptr; int32_t vector1b, vector2b, vector3b, vector4b; int32_t Temp1, Temp2, Temp3, Temp4; @@ -139,7 +139,7 @@ static void convolve_horiz_8_transposed_dspr2(const uint8_t *src, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint8_t *dst_ptr; uint32_t vector4a = 64; int32_t vector1b, vector2b, vector3b, vector4b; @@ -312,7 +312,7 @@ static void convolve_horiz_16_transposed_dspr2( int32_t c, y; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t filter12, filter34, filter56, filter78; int32_t Temp1, Temp2, Temp3; @@ -790,7 +790,7 @@ static void convolve_horiz_64_transposed_dspr2( int32_t c, y; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t filter12, filter34, filter56, filter78; int32_t Temp1, Temp2, Temp3; @@ -1296,7 +1296,7 @@ void copy_horiz_transposed(const uint8_t *src, ptrdiff_t src_stride, } } -void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { @@ -1321,7 +1321,7 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, copy_horiz_transposed(src - src_stride * 3, src_stride, temp, intermediate_height, w, intermediate_height); } else if (((const int32_t *)filter_x)[0] == 0) { - vpx_convolve2_dspr2(src - src_stride * 3, src_stride, temp, + aom_convolve2_dspr2(src - src_stride * 3, src_stride, temp, intermediate_height, filter_x, w, intermediate_height); } else { src -= (src_stride * 3 + 3); @@ -1364,7 +1364,7 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, if (filter_y[3] == 0x80) { copy_horiz_transposed(temp + 3, intermediate_height, dst, dst_stride, h, w); } else if (((const int32_t *)filter_y)[0] == 0) { - vpx_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride, + aom_convolve2_dspr2(temp + 3, intermediate_height, dst, dst_stride, filter_y, h, w); } else { switch (h) { @@ -1393,7 +1393,7 @@ void vpx_convolve8_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, } } -void vpx_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve_copy_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int filter_x_stride, const int16_t *filter_y, int filter_y_stride, diff --git a/aom_dsp/mips/convolve8_horiz_dspr2.c b/aom_dsp/mips/convolve8_horiz_dspr2.c index 61291ca938cc855ddf1cbf5e286d703fe934a08a..c60557617922bb0104aad89fae6fec76a0c58b9c 100644 --- a/aom_dsp/mips/convolve8_horiz_dspr2.c +++ b/aom_dsp/mips/convolve8_horiz_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_dsp/vpx_filter.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/aom_filter.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -23,7 +23,7 @@ static void convolve_horiz_4_dspr2(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; int32_t vector1b, vector2b, vector3b, vector4b; int32_t Temp1, Temp2, Temp3, Temp4; uint32_t vector4a = 64; @@ -129,7 +129,7 @@ static void convolve_horiz_8_dspr2(const uint8_t *src, int32_t src_stride, uint8_t *dst, int32_t dst_stride, const int16_t *filter_x0, int32_t h) { int32_t y; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; int32_t vector1b, vector2b, vector3b, vector4b; int32_t Temp1, Temp2, Temp3; @@ -291,7 +291,7 @@ static void convolve_horiz_16_dspr2(const uint8_t *src_ptr, int32_t src_stride, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t filter12, filter34, filter56, filter78; int32_t Temp1, Temp2, Temp3; @@ -556,7 +556,7 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, int32_t src_stride, int32_t y, c; const uint8_t *src; uint8_t *dst; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector_64 = 64; int32_t filter12, filter34, filter56, filter78; int32_t Temp1, Temp2, Temp3; @@ -817,7 +817,7 @@ static void convolve_horiz_64_dspr2(const uint8_t *src_ptr, int32_t src_stride, } } -void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -826,7 +826,7 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(((const int32_t *)filter_x)[1] != 0x800000); if (((const int32_t *)filter_x)[0] == 0) { - vpx_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x, + aom_convolve2_horiz_dspr2(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; @@ -869,7 +869,7 @@ void vpx_convolve8_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, (int32_t)dst_stride, filter_x, (int32_t)h); break; default: - vpx_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x, + aom_convolve8_horiz_c(src + 3, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/convolve8_vert_dspr2.c b/aom_dsp/mips/convolve8_vert_dspr2.c index b4c288608343d82fe43c52f594eb6c4a7416a3e4..d8a90b6abd409207753f0d02212b6ac89b239ac9 100644 --- a/aom_dsp/mips/convolve8_vert_dspr2.c +++ b/aom_dsp/mips/convolve8_vert_dspr2.c @@ -12,10 +12,10 @@ #include #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/convolve_common_dspr2.h" -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_dsp/vpx_filter.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_dsp/aom_filter.h" #include "aom_ports/mem.h" #if HAVE_DSPR2 @@ -26,7 +26,7 @@ static void convolve_vert_4_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2, load3, load4; uint32_t p1, p2; @@ -174,7 +174,7 @@ static void convolve_vert_64_dspr2(const uint8_t *src, int32_t src_stride, int32_t x, y; const uint8_t *src_ptr; uint8_t *dst_ptr; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; uint32_t vector4a = 64; uint32_t load1, load2, load3, load4; uint32_t p1, p2; @@ -317,7 +317,7 @@ static void convolve_vert_64_dspr2(const uint8_t *src, int32_t src_stride, } } -void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, @@ -326,7 +326,7 @@ void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, assert(((const int32_t *)filter_y)[1] != 0x800000); if (((const int32_t *)filter_y)[0] == 0) { - vpx_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x, + aom_convolve2_vert_dspr2(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); } else { uint32_t pos = 38; @@ -350,7 +350,7 @@ void vpx_convolve8_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, convolve_vert_64_dspr2(src, src_stride, dst, dst_stride, filter_y, h); break; default: - vpx_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, + aom_convolve8_vert_c(src, src_stride, dst, dst_stride, filter_x, x_step_q4, filter_y, y_step_q4, w, h); break; } diff --git a/aom_dsp/mips/convolve_common_dspr2.h b/aom_dsp/mips/convolve_common_dspr2.h index d17393bc22d8d9fb0b278640f08b1465c41fb907..8f2c40bfde8e95343283b970799a2111ecc2267f 100644 --- a/aom_dsp/mips/convolve_common_dspr2.h +++ b/aom_dsp/mips/convolve_common_dspr2.h @@ -14,8 +14,8 @@ #include -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #include "aom_dsp/mips/common_dspr2.h" #ifdef __cplusplus @@ -23,29 +23,29 @@ extern "C" { #endif #if HAVE_DSPR2 -void vpx_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); -void vpx_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_avg_horiz_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); -void vpx_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_avg_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h); -void vpx_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_convolve2_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter, int w, int h); -void vpx_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, +void aom_convolve2_vert_dspr2(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, diff --git a/aom_dsp/mips/fwd_dct32x32_msa.c b/aom_dsp/mips/fwd_dct32x32_msa.c index 29f4f3ca847d6b4f9078ed9a7ccf55e5ee3a6f6d..2fae3a7c1dfae2bb19daebe958e2386c1abe4c3b 100644 --- a/aom_dsp/mips/fwd_dct32x32_msa.c +++ b/aom_dsp/mips/fwd_dct32x32_msa.c @@ -671,7 +671,7 @@ static void fdct32x8_1d_row_4x(int16_t *tmp_buf_big, int16_t *tmp_buf, fdct8x32_1d_row_transpose_store(tmp_buf, output); } -void vpx_fdct32x32_msa(const int16_t *input, int16_t *output, +void aom_fdct32x32_msa(const int16_t *input, int16_t *output, int32_t src_stride) { int32_t i; DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]); @@ -908,7 +908,7 @@ static void fdct32x8_1d_row_rd(int16_t *tmp_buf_big, int16_t *tmp_buf, fdct8x32_1d_row_transpose_store(tmp_buf, output); } -void vpx_fdct32x32_rd_msa(const int16_t *input, int16_t *out, +void aom_fdct32x32_rd_msa(const int16_t *input, int16_t *out, int32_t src_stride) { int32_t i; DECLARE_ALIGNED(32, int16_t, tmp_buf_big[1024]); @@ -927,7 +927,7 @@ void vpx_fdct32x32_rd_msa(const int16_t *input, int16_t *out, } } -void vpx_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) { +void aom_fdct32x32_1_msa(const int16_t *input, int16_t *out, int32_t stride) { out[1] = 0; out[0] = LD_HADD(input, stride); diff --git a/aom_dsp/mips/fwd_txfm_msa.c b/aom_dsp/mips/fwd_txfm_msa.c index c385cd75d7738a75e7e22709e33d9b5aceed7373..d9f2b1d959fd7015b3ccbb331ab971ae91145724 100644 --- a/aom_dsp/mips/fwd_txfm_msa.c +++ b/aom_dsp/mips/fwd_txfm_msa.c @@ -167,7 +167,7 @@ void fdct16x8_1d_row(int16_t *input, int16_t *output) { ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16); } -void vpx_fdct4x4_msa(const int16_t *input, int16_t *output, +void aom_fdct4x4_msa(const int16_t *input, int16_t *output, int32_t src_stride) { v8i16 in0, in1, in2, in3; @@ -197,7 +197,7 @@ void vpx_fdct4x4_msa(const int16_t *input, int16_t *output, ST_SH2(in0, in2, output, 8); } -void vpx_fdct8x8_msa(const int16_t *input, int16_t *output, +void aom_fdct8x8_msa(const int16_t *input, int16_t *output, int32_t src_stride) { v8i16 in0, in1, in2, in3, in4, in5, in6, in7; @@ -216,12 +216,12 @@ void vpx_fdct8x8_msa(const int16_t *input, int16_t *output, ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); } -void vpx_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) { +void aom_fdct8x8_1_msa(const int16_t *input, int16_t *out, int32_t stride) { out[0] = LD_HADD(input, stride); out[1] = 0; } -void vpx_fdct16x16_msa(const int16_t *input, int16_t *output, +void aom_fdct16x16_msa(const int16_t *input, int16_t *output, int32_t src_stride) { int32_t i; DECLARE_ALIGNED(32, int16_t, tmp_buf[16 * 16]); @@ -237,7 +237,7 @@ void vpx_fdct16x16_msa(const int16_t *input, int16_t *output, } } -void vpx_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) { +void aom_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) { out[1] = 0; out[0] = LD_HADD(input, stride); diff --git a/aom_dsp/mips/idct16x16_msa.c b/aom_dsp/mips/idct16x16_msa.c index fb40535dc07b2c90a9a831ea2567ee923604ba13..03008739c65dacd9f0952aa45c1c10953550ed24 100644 --- a/aom_dsp/mips/idct16x16_msa.c +++ b/aom_dsp/mips/idct16x16_msa.c @@ -11,7 +11,7 @@ #include "aom_dsp/mips/inv_txfm_msa.h" -void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) { +void aom_idct16_1d_rows_msa(const int16_t *input, int16_t *output) { v8i16 loc0, loc1, loc2, loc3; v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15; @@ -104,7 +104,7 @@ void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output) { ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16); } -void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, +void aom_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 loc0, loc1, loc2, loc3; v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14; @@ -202,7 +202,7 @@ void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, VPX_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15); } -void vpx_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int32_t i; DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]); @@ -211,25 +211,25 @@ void vpx_idct16x16_256_add_msa(const int16_t *input, uint8_t *dst, /* transform rows */ for (i = 0; i < 2; ++i) { /* process 16 * 8 block */ - vpx_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7))); + aom_idct16_1d_rows_msa((input + (i << 7)), (out + (i << 7))); } /* transform columns */ for (i = 0; i < 2; ++i) { /* process 8 * 16 block */ - vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), + aom_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), dst_stride); } } -void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { uint8_t i; DECLARE_ALIGNED(32, int16_t, out_arr[16 * 16]); int16_t *out = out_arr; /* process 16 * 8 block */ - vpx_idct16_1d_rows_msa(input, out); + aom_idct16_1d_rows_msa(input, out); /* short case just considers top 4 rows as valid output */ out += 4 * 16; @@ -255,12 +255,12 @@ void vpx_idct16x16_10_add_msa(const int16_t *input, uint8_t *dst, /* transform columns */ for (i = 0; i < 2; ++i) { /* process 8 * 16 block */ - vpx_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), + aom_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)), dst_stride); } } -void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { uint8_t i; int16_t out; @@ -290,7 +290,7 @@ void vpx_idct16x16_1_add_msa(const int16_t *input, uint8_t *dst, } } -void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) { +void aom_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) { v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15; @@ -320,7 +320,7 @@ void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output) { ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16); } -void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, +void aom_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 v0, v2, v4, v6, k0, k1, k2, k3; v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15; diff --git a/aom_dsp/mips/idct32x32_msa.c b/aom_dsp/mips/idct32x32_msa.c index 239edc30dbbf1eb9f74ce439b09998a9b3703625..61a54a2c4cf3cd44289f9a20be3d25f3d858b30a 100644 --- a/aom_dsp/mips/idct32x32_msa.c +++ b/aom_dsp/mips/idct32x32_msa.c @@ -630,7 +630,7 @@ static void idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, dst_stride); } -void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int32_t i; DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]); @@ -650,7 +650,7 @@ void vpx_idct32x32_1024_add_msa(const int16_t *input, uint8_t *dst, } } -void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int32_t i; DECLARE_ALIGNED(32, int16_t, out_arr[32 * 32]); @@ -694,7 +694,7 @@ void vpx_idct32x32_34_add_msa(const int16_t *input, uint8_t *dst, } } -void vpx_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct32x32_1_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int32_t i; int16_t out; diff --git a/aom_dsp/mips/idct4x4_msa.c b/aom_dsp/mips/idct4x4_msa.c index 344be0bbb79718bae919ffb82b38a96c97bc5321..f4d8dfa7817029622c3a72bea2a21335cd486d69 100644 --- a/aom_dsp/mips/idct4x4_msa.c +++ b/aom_dsp/mips/idct4x4_msa.c @@ -11,7 +11,7 @@ #include "aom_dsp/mips/inv_txfm_msa.h" -void vpx_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst, +void aom_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3; v4i32 in0_r, in1_r, in2_r, in3_r, in4_r; @@ -48,7 +48,7 @@ void vpx_iwht4x4_16_add_msa(const int16_t *input, uint8_t *dst, ADDBLK_ST4x4_UB(in0, in3, in1, in2, dst, dst_stride); } -void vpx_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst, +void aom_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int16_t a1, e1; v8i16 in1, in0 = { 0 }; @@ -68,7 +68,7 @@ void vpx_iwht4x4_1_add_msa(const int16_t *input, uint8_t *dst, ADDBLK_ST4x4_UB(in0, in1, in1, in1, dst, dst_stride); } -void vpx_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3; @@ -85,7 +85,7 @@ void vpx_idct4x4_16_add_msa(const int16_t *input, uint8_t *dst, ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride); } -void vpx_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct4x4_1_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int16_t out; v8i16 vec; diff --git a/aom_dsp/mips/idct8x8_msa.c b/aom_dsp/mips/idct8x8_msa.c index f2ff15d66a5ea6bf4df47e80be14cb90b8ad8474..a9e933208be0c13296f909f7f1a512723b1e8c40 100644 --- a/aom_dsp/mips/idct8x8_msa.c +++ b/aom_dsp/mips/idct8x8_msa.c @@ -11,7 +11,7 @@ #include "aom_dsp/mips/inv_txfm_msa.h" -void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3, in4, in5, in6, in7; @@ -39,7 +39,7 @@ void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst, VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); } -void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { v8i16 in0, in1, in2, in3, in4, in5, in6, in7; v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3; @@ -100,7 +100,7 @@ void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst, VPX_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7); } -void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst, +void aom_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst, int32_t dst_stride) { int16_t out; int32_t val; diff --git a/aom_dsp/mips/intrapred16_dspr2.c b/aom_dsp/mips/intrapred16_dspr2.c index 2784460bdaa1523d30f4022dace0962d87249a85..dc8f20208a610d1d5ed8040219754a4d8254192d 100644 --- a/aom_dsp/mips/intrapred16_dspr2.c +++ b/aom_dsp/mips/intrapred16_dspr2.c @@ -12,7 +12,7 @@ #include "aom_dsp/mips/common_dspr2.h" #if HAVE_DSPR2 -void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; int32_t tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, tmp16; @@ -156,7 +156,7 @@ void vpx_h_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride)); } -void vpx_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_16x16_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t expected_dc; int32_t average; diff --git a/aom_dsp/mips/intrapred4_dspr2.c b/aom_dsp/mips/intrapred4_dspr2.c index e142277bc9d508a17b0b64c33ec807ea51e07462..ea7c02810a6ce75bba425a3ba9b09149f96798c5 100644 --- a/aom_dsp/mips/intrapred4_dspr2.c +++ b/aom_dsp/mips/intrapred4_dspr2.c @@ -12,7 +12,7 @@ #include "aom_dsp/mips/common_dspr2.h" #if HAVE_DSPR2 -void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t tmp1, tmp2, tmp3, tmp4; @@ -38,7 +38,7 @@ void vpx_h_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride)); } -void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t expected_dc; int32_t average; @@ -79,7 +79,7 @@ void vpx_dc_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, [stride] "r"(stride)); } -void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t abovel, abover; int32_t left0, left1, left2, left3; @@ -87,7 +87,7 @@ void vpx_tm_predictor_4x4_dspr2(uint8_t *dst, ptrdiff_t stride, int32_t resl; int32_t resr; int32_t top_left; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; __asm__ __volatile__( "ulw %[resl], (%[above]) \n\t" diff --git a/aom_dsp/mips/intrapred8_dspr2.c b/aom_dsp/mips/intrapred8_dspr2.c index 0bf4bd317e4438d62e01df9404c0ed28c77e35a7..1114fbc0065611cc188a395ab6e2d3f806993a15 100644 --- a/aom_dsp/mips/intrapred8_dspr2.c +++ b/aom_dsp/mips/intrapred8_dspr2.c @@ -12,7 +12,7 @@ #include "aom_dsp/mips/common_dspr2.h" #if HAVE_DSPR2 -void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; @@ -65,7 +65,7 @@ void vpx_h_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, : [left] "r"(left), [dst] "r"(dst), [stride] "r"(stride)); } -void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t expected_dc; int32_t average; @@ -147,7 +147,7 @@ void vpx_dc_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, [stride] "r"(stride)); } -void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, +void aom_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, const uint8_t *above, const uint8_t *left) { int32_t abovel, abover; int32_t abovel_1, abover_1; @@ -155,7 +155,7 @@ void vpx_tm_predictor_8x8_dspr2(uint8_t *dst, ptrdiff_t stride, int32_t res0, res1, res2, res3; int32_t reshw; int32_t top_left; - uint8_t *cm = vpx_ff_cropTbl; + uint8_t *cm = aom_ff_cropTbl; __asm__ __volatile__( "ulw %[reshw], (%[above]) \n\t" diff --git a/aom_dsp/mips/intrapred_msa.c b/aom_dsp/mips/intrapred_msa.c index 5ab9205077342fc3e05e801fd47588b53ecbeefe..e8eaec7a940e2f9bfa298be3f8ab17fa07e1fa27 100644 --- a/aom_dsp/mips/intrapred_msa.c +++ b/aom_dsp/mips/intrapred_msa.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/macros_msa.h" #define IPRED_SUBS_UH2_UH(in0, in1, out0, out1) \ @@ -552,125 +552,125 @@ static void intra_predict_tm_32x32_msa(const uint8_t *src_top, } } -void vpx_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_v_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_vert_4x4_msa(above, dst, y_stride); } -void vpx_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_v_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_vert_8x8_msa(above, dst, y_stride); } -void vpx_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_v_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_vert_16x16_msa(above, dst, y_stride); } -void vpx_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_v_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_vert_32x32_msa(above, dst, y_stride); } -void vpx_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_h_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; intra_predict_horiz_4x4_msa(left, dst, y_stride); } -void vpx_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_h_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; intra_predict_horiz_8x8_msa(left, dst, y_stride); } -void vpx_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_h_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; intra_predict_horiz_16x16_msa(left, dst, y_stride); } -void vpx_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_h_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; intra_predict_horiz_32x32_msa(left, dst, y_stride); } -void vpx_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_dc_4x4_msa(above, left, dst, y_stride); } -void vpx_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_dc_8x8_msa(above, left, dst, y_stride); } -void vpx_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_dc_16x16_msa(above, left, dst, y_stride); } -void vpx_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_dc_32x32_msa(above, left, dst, y_stride); } -void vpx_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_top_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_dc_tl_4x4_msa(above, dst, y_stride); } -void vpx_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_top_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_dc_tl_8x8_msa(above, dst, y_stride); } -void vpx_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_top_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_dc_tl_16x16_msa(above, dst, y_stride); } -void vpx_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_top_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)left; intra_predict_dc_tl_32x32_msa(above, dst, y_stride); } -void vpx_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_left_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; intra_predict_dc_tl_4x4_msa(left, dst, y_stride); } -void vpx_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_left_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; intra_predict_dc_tl_8x8_msa(left, dst, y_stride); } -void vpx_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; @@ -678,7 +678,7 @@ void vpx_dc_left_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, intra_predict_dc_tl_16x16_msa(left, dst, y_stride); } -void vpx_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; @@ -686,7 +686,7 @@ void vpx_dc_left_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, intra_predict_dc_tl_32x32_msa(left, dst, y_stride); } -void vpx_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; (void)left; @@ -694,7 +694,7 @@ void vpx_dc_128_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, intra_predict_128dc_4x4_msa(dst, y_stride); } -void vpx_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; (void)left; @@ -702,7 +702,7 @@ void vpx_dc_128_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, intra_predict_128dc_8x8_msa(dst, y_stride); } -void vpx_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; (void)left; @@ -710,7 +710,7 @@ void vpx_dc_128_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, intra_predict_128dc_16x16_msa(dst, y_stride); } -void vpx_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { (void)above; (void)left; @@ -718,22 +718,22 @@ void vpx_dc_128_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, intra_predict_128dc_32x32_msa(dst, y_stride); } -void vpx_tm_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_tm_predictor_4x4_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_tm_4x4_msa(above, left, dst, y_stride); } -void vpx_tm_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_tm_predictor_8x8_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_tm_8x8_msa(above, left, dst, y_stride); } -void vpx_tm_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_tm_predictor_16x16_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_tm_16x16_msa(above, left, dst, y_stride); } -void vpx_tm_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, +void aom_tm_predictor_32x32_msa(uint8_t *dst, ptrdiff_t y_stride, const uint8_t *above, const uint8_t *left) { intra_predict_tm_32x32_msa(above, left, dst, y_stride); } diff --git a/aom_dsp/mips/inv_txfm_dspr2.h b/aom_dsp/mips/inv_txfm_dspr2.h index 17664c276a08d33c2bb2b8608002473a4ee64431..e7425744f81d3840e3363141ffb0db1db9b2fea7 100644 --- a/aom_dsp/mips/inv_txfm_dspr2.h +++ b/aom_dsp/mips/inv_txfm_dspr2.h @@ -14,8 +14,8 @@ #include -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #include "aom_dsp/inv_txfm.h" #include "aom_dsp/mips/common_dspr2.h" @@ -59,10 +59,10 @@ extern "C" { out; \ }) -void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, +void aom_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride); -void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output); -void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, +void aom_idct4_rows_dspr2(const int16_t *input, int16_t *output); +void aom_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride); void iadst4_dspr2(const int16_t *input, int16_t *output); void idct8_rows_dspr2(const int16_t *input, int16_t *output, uint32_t no_rows); diff --git a/aom_dsp/mips/inv_txfm_msa.h b/aom_dsp/mips/inv_txfm_msa.h index 3e1f9b6d886e8f91c176c753a7f3097a99922619..c8950ba451d9d435d17dfbd4be007f4935d28771 100644 --- a/aom_dsp/mips/inv_txfm_msa.h +++ b/aom_dsp/mips/inv_txfm_msa.h @@ -403,10 +403,10 @@ MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \ } -void vpx_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, +void aom_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride); -void vpx_idct16_1d_rows_msa(const int16_t *input, int16_t *output); -void vpx_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, +void aom_idct16_1d_rows_msa(const int16_t *input, int16_t *output); +void aom_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride); -void vpx_iadst16_1d_rows_msa(const int16_t *input, int16_t *output); +void aom_iadst16_1d_rows_msa(const int16_t *input, int16_t *output); #endif // VPX_DSP_MIPS_INV_TXFM_MSA_H_ diff --git a/aom_dsp/mips/itrans16_dspr2.c b/aom_dsp/mips/itrans16_dspr2.c index 2774dc18149c30c1c7a3f96318db7b9c2cedb9b8..c63b1e8570642d1f68ca9e9ab0299f282db2a7f6 100644 --- a/aom_dsp/mips/itrans16_dspr2.c +++ b/aom_dsp/mips/itrans16_dspr2.c @@ -9,8 +9,8 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/inv_txfm_dspr2.h" #include "aom_dsp/txfm_common.h" @@ -402,17 +402,17 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { int result1, result2, result3, result4; const int const_2_power_13 = 8192; uint8_t *dest_pix; - uint8_t *cm = vpx_ff_cropTbl; - - /* prefetch vpx_ff_cropTbl */ - prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); - prefetch_load(vpx_ff_cropTbl + 128); - prefetch_load(vpx_ff_cropTbl + 160); - prefetch_load(vpx_ff_cropTbl + 192); - prefetch_load(vpx_ff_cropTbl + 224); + uint8_t *cm = aom_ff_cropTbl; + + /* prefetch aom_ff_cropTbl */ + prefetch_load(aom_ff_cropTbl); + prefetch_load(aom_ff_cropTbl + 32); + prefetch_load(aom_ff_cropTbl + 64); + prefetch_load(aom_ff_cropTbl + 96); + prefetch_load(aom_ff_cropTbl + 128); + prefetch_load(aom_ff_cropTbl + 160); + prefetch_load(aom_ff_cropTbl + 192); + prefetch_load(aom_ff_cropTbl + 224); for (i = 0; i < 16; ++i) { dest_pix = (dest + i); @@ -869,7 +869,7 @@ void idct16_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { } } -void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { DECLARE_ALIGNED(32, int16_t, out[16 * 16]); uint32_t pos = 45; @@ -884,7 +884,7 @@ void vpx_idct16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, idct16_cols_add_blk_dspr2(out, dest, dest_stride); } -void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { DECLARE_ALIGNED(32, int16_t, out[16 * 16]); int16_t *outptr = out; @@ -928,7 +928,7 @@ void vpx_idct16x16_10_add_dspr2(const int16_t *input, uint8_t *dest, idct16_cols_add_blk_dspr2(out, dest, dest_stride); } -void vpx_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct16x16_1_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { uint32_t pos = 45; int32_t out; diff --git a/aom_dsp/mips/itrans32_cols_dspr2.c b/aom_dsp/mips/itrans32_cols_dspr2.c index 6373b08aaa09ee1a87f173054369321f111a4797..d469d1ad0b1401088c677ba6cd5154a0871c9168 100644 --- a/aom_dsp/mips/itrans32_cols_dspr2.c +++ b/aom_dsp/mips/itrans32_cols_dspr2.c @@ -9,12 +9,12 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/mips/inv_txfm_dspr2.h" #include "aom_dsp/txfm_common.h" #if HAVE_DSPR2 -void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, +void aom_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { int16_t step1_0, step1_1, step1_2, step1_3, step1_4, step1_5, step1_6; int16_t step1_7, step1_8, step1_9, step1_10, step1_11, step1_12, step1_13; @@ -36,17 +36,17 @@ void vpx_idct32_cols_add_blk_dspr2(int16_t *input, uint8_t *dest, int i, temp21; uint8_t *dest_pix, *dest_pix1; const int const_2_power_13 = 8192; - uint8_t *cm = vpx_ff_cropTbl; - - /* prefetch vpx_ff_cropTbl */ - prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); - prefetch_load(vpx_ff_cropTbl + 128); - prefetch_load(vpx_ff_cropTbl + 160); - prefetch_load(vpx_ff_cropTbl + 192); - prefetch_load(vpx_ff_cropTbl + 224); + uint8_t *cm = aom_ff_cropTbl; + + /* prefetch aom_ff_cropTbl */ + prefetch_load(aom_ff_cropTbl); + prefetch_load(aom_ff_cropTbl + 32); + prefetch_load(aom_ff_cropTbl + 64); + prefetch_load(aom_ff_cropTbl + 96); + prefetch_load(aom_ff_cropTbl + 128); + prefetch_load(aom_ff_cropTbl + 160); + prefetch_load(aom_ff_cropTbl + 192); + prefetch_load(aom_ff_cropTbl + 224); for (i = 0; i < 32; ++i) { dest_pix = dest + i; diff --git a/aom_dsp/mips/itrans32_dspr2.c b/aom_dsp/mips/itrans32_dspr2.c index 3c04dead83015a87b82e423c6765461f1b03ae9c..fa77032176df1ac1d22d01a370feae0fd7500ddf 100644 --- a/aom_dsp/mips/itrans32_dspr2.c +++ b/aom_dsp/mips/itrans32_dspr2.c @@ -12,7 +12,7 @@ #include #include -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_dsp/mips/inv_txfm_dspr2.h" #include "aom_dsp/txfm_common.h" @@ -836,7 +836,7 @@ static void idct32_rows_dspr2(const int16_t *input, int16_t *output, } } -void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { DECLARE_ALIGNED(32, int16_t, out[32 * 32]); int16_t *outptr = out; @@ -851,10 +851,10 @@ void vpx_idct32x32_1024_add_dspr2(const int16_t *input, uint8_t *dest, idct32_rows_dspr2(input, outptr, 32); // Columns - vpx_idct32_cols_add_blk_dspr2(out, dest, dest_stride); + aom_idct32_cols_add_blk_dspr2(out, dest, dest_stride); } -void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, int stride) { DECLARE_ALIGNED(32, int16_t, out[32 * 32]); int16_t *outptr = out; @@ -909,10 +909,10 @@ void vpx_idct32x32_34_add_dspr2(const int16_t *input, uint8_t *dest, } // Columns - vpx_idct32_cols_add_blk_dspr2(out, dest, stride); + aom_idct32_cols_add_blk_dspr2(out, dest, stride); } -void vpx_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct32x32_1_add_dspr2(const int16_t *input, uint8_t *dest, int stride) { int r, out; int32_t a1, absa1; diff --git a/aom_dsp/mips/itrans4_dspr2.c b/aom_dsp/mips/itrans4_dspr2.c index ea5996450d9f8a96843b15e09a63a878f63288ba..e6d0367cd2f024309c5513bd20edbd33073cc21b 100644 --- a/aom_dsp/mips/itrans4_dspr2.c +++ b/aom_dsp/mips/itrans4_dspr2.c @@ -9,13 +9,13 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/inv_txfm_dspr2.h" #include "aom_dsp/txfm_common.h" #if HAVE_DSPR2 -void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) { +void aom_idct4_rows_dspr2(const int16_t *input, int16_t *output) { int16_t step_0, step_1, step_2, step_3; int Temp0, Temp1, Temp2, Temp3; const int const_2_power_13 = 8192; @@ -96,24 +96,24 @@ void vpx_idct4_rows_dspr2(const int16_t *input, int16_t *output) { } } -void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, +void aom_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, int dest_stride) { int16_t step_0, step_1, step_2, step_3; int Temp0, Temp1, Temp2, Temp3; const int const_2_power_13 = 8192; int i; uint8_t *dest_pix; - uint8_t *cm = vpx_ff_cropTbl; - - /* prefetch vpx_ff_cropTbl */ - prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); - prefetch_load(vpx_ff_cropTbl + 128); - prefetch_load(vpx_ff_cropTbl + 160); - prefetch_load(vpx_ff_cropTbl + 192); - prefetch_load(vpx_ff_cropTbl + 224); + uint8_t *cm = aom_ff_cropTbl; + + /* prefetch aom_ff_cropTbl */ + prefetch_load(aom_ff_cropTbl); + prefetch_load(aom_ff_cropTbl + 32); + prefetch_load(aom_ff_cropTbl + 64); + prefetch_load(aom_ff_cropTbl + 96); + prefetch_load(aom_ff_cropTbl + 128); + prefetch_load(aom_ff_cropTbl + 160); + prefetch_load(aom_ff_cropTbl + 192); + prefetch_load(aom_ff_cropTbl + 224); for (i = 0; i < 4; ++i) { dest_pix = (dest + i); @@ -216,7 +216,7 @@ void vpx_idct4_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, } } -void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { DECLARE_ALIGNED(32, int16_t, out[4 * 4]); int16_t *outptr = out; @@ -228,13 +228,13 @@ void vpx_idct4x4_16_add_dspr2(const int16_t *input, uint8_t *dest, : [pos] "r"(pos)); // Rows - vpx_idct4_rows_dspr2(input, outptr); + aom_idct4_rows_dspr2(input, outptr); // Columns - vpx_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride); + aom_idct4_columns_add_blk_dspr2(&out[0], dest, dest_stride); } -void vpx_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct4x4_1_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { int a1, absa1; int r; diff --git a/aom_dsp/mips/itrans8_dspr2.c b/aom_dsp/mips/itrans8_dspr2.c index 3781dbe3ecf387905f4c0f2e16206fb5530c11f8..0a20f76f2d97d5fc346b16e0c43715ab16426349 100644 --- a/aom_dsp/mips/itrans8_dspr2.c +++ b/aom_dsp/mips/itrans8_dspr2.c @@ -9,8 +9,8 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/inv_txfm_dspr2.h" #include "aom_dsp/txfm_common.h" @@ -200,17 +200,17 @@ void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, int i; const int const_2_power_13 = 8192; uint8_t *dest_pix; - uint8_t *cm = vpx_ff_cropTbl; - - /* prefetch vpx_ff_cropTbl */ - prefetch_load(vpx_ff_cropTbl); - prefetch_load(vpx_ff_cropTbl + 32); - prefetch_load(vpx_ff_cropTbl + 64); - prefetch_load(vpx_ff_cropTbl + 96); - prefetch_load(vpx_ff_cropTbl + 128); - prefetch_load(vpx_ff_cropTbl + 160); - prefetch_load(vpx_ff_cropTbl + 192); - prefetch_load(vpx_ff_cropTbl + 224); + uint8_t *cm = aom_ff_cropTbl; + + /* prefetch aom_ff_cropTbl */ + prefetch_load(aom_ff_cropTbl); + prefetch_load(aom_ff_cropTbl + 32); + prefetch_load(aom_ff_cropTbl + 64); + prefetch_load(aom_ff_cropTbl + 96); + prefetch_load(aom_ff_cropTbl + 128); + prefetch_load(aom_ff_cropTbl + 160); + prefetch_load(aom_ff_cropTbl + 192); + prefetch_load(aom_ff_cropTbl + 224); for (i = 0; i < 8; ++i) { dest_pix = (dest + i); @@ -439,7 +439,7 @@ void idct8_columns_add_blk_dspr2(int16_t *input, uint8_t *dest, } } -void vpx_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { DECLARE_ALIGNED(32, int16_t, out[8 * 8]); int16_t *outptr = out; @@ -455,7 +455,7 @@ void vpx_idct8x8_64_add_dspr2(const int16_t *input, uint8_t *dest, idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); } -void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { DECLARE_ALIGNED(32, int16_t, out[8 * 8]); int16_t *outptr = out; @@ -494,7 +494,7 @@ void vpx_idct8x8_12_add_dspr2(const int16_t *input, uint8_t *dest, idct8_columns_add_blk_dspr2(&out[0], dest, dest_stride); } -void vpx_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest, +void aom_idct8x8_1_add_dspr2(const int16_t *input, uint8_t *dest, int dest_stride) { uint32_t pos = 45; int32_t out; diff --git a/aom_dsp/mips/loopfilter_16_msa.c b/aom_dsp/mips/loopfilter_16_msa.c index c408af1e980d1387d6cc00522a9684794e7ce84c..b3c3fd3aec81d71b9b846c9893c891724923a177 100644 --- a/aom_dsp/mips/loopfilter_16_msa.c +++ b/aom_dsp/mips/loopfilter_16_msa.c @@ -12,7 +12,7 @@ #include "aom_ports/mem.h" #include "aom_dsp/mips/loopfilter_msa.h" -int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48, +int32_t aom_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr) { @@ -78,7 +78,7 @@ int32_t vpx_hz_lpf_t4_and_t8_16w(uint8_t *src, int32_t pitch, uint8_t *filter48, } } -void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) { +void aom_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) { v16u8 flat, flat2, filter8; v16i8 zero = { 0 }; v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; @@ -404,7 +404,7 @@ void vpx_hz_lpf_t16_16w(uint8_t *src, int32_t pitch, uint8_t *filter48) { } } -void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch, +void aom_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { @@ -413,15 +413,15 @@ void vpx_lpf_horizontal_16_dual_msa(uint8_t *src, int32_t pitch, (void)count; - early_exit = vpx_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr, + early_exit = aom_hz_lpf_t4_and_t8_16w(src, pitch, &filter48[0], b_limit_ptr, limit_ptr, thresh_ptr); if (0 == early_exit) { - vpx_hz_lpf_t16_16w(src, pitch, filter48); + aom_hz_lpf_t16_16w(src, pitch, filter48); } } -void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, +void aom_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { @@ -639,7 +639,7 @@ void vpx_lpf_horizontal_16_msa(uint8_t *src, int32_t pitch, } } } else { - vpx_lpf_horizontal_16_dual_msa(src, pitch, b_limit_ptr, limit_ptr, + aom_lpf_horizontal_16_dual_msa(src, pitch, b_limit_ptr, limit_ptr, thresh_ptr, count); } } @@ -739,7 +739,7 @@ static void transpose_16x16(uint8_t *input, int32_t in_pitch, uint8_t *output, ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch); } -int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48, +int32_t aom_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48, uint8_t *src_org, int32_t pitch_org, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, @@ -806,7 +806,7 @@ int32_t vpx_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48, } } -int32_t vpx_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch, +int32_t aom_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch, uint8_t *filter48) { v16i8 zero = { 0 }; v16u8 filter8, flat, flat2; @@ -1026,7 +1026,7 @@ int32_t vpx_vt_lpf_t16_8w(uint8_t *src, uint8_t *src_org, int32_t pitch, } } -void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch, +void aom_lpf_vertical_16_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr) { @@ -1037,11 +1037,11 @@ void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch, transpose_16x8_to_8x16(src - 8, pitch, transposed_input, 16); early_exit = - vpx_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src, + aom_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8), &filter48[0], src, pitch, b_limit_ptr, limit_ptr, thresh_ptr); if (0 == early_exit) { - early_exit = vpx_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch, + early_exit = aom_vt_lpf_t16_8w((transposed_input + 16 * 8), src, pitch, &filter48[0]); if (0 == early_exit) { @@ -1050,7 +1050,7 @@ void vpx_lpf_vertical_16_msa(uint8_t *src, int32_t pitch, } } -int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48, +int32_t aom_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48, uint8_t *src_org, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, @@ -1127,7 +1127,7 @@ int32_t vpx_vt_lpf_t4_and_t8_16w(uint8_t *src, uint8_t *filter48, } } -int32_t vpx_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch, +int32_t aom_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch, uint8_t *filter48) { v16u8 flat, flat2, filter8; v16i8 zero = { 0 }; @@ -1448,7 +1448,7 @@ int32_t vpx_vt_lpf_t16_16w(uint8_t *src, uint8_t *src_org, int32_t pitch, } } -void vpx_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch, +void aom_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr) { @@ -1459,11 +1459,11 @@ void vpx_lpf_vertical_16_dual_msa(uint8_t *src, int32_t pitch, transpose_16x16((src - 8), pitch, &transposed_input[0], 16); early_exit = - vpx_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src, + aom_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8), &filter48[0], src, pitch, b_limit_ptr, limit_ptr, thresh_ptr); if (0 == early_exit) { - early_exit = vpx_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch, + early_exit = aom_vt_lpf_t16_16w((transposed_input + 16 * 8), src, pitch, &filter48[0]); if (0 == early_exit) { diff --git a/aom_dsp/mips/loopfilter_4_msa.c b/aom_dsp/mips/loopfilter_4_msa.c index 785798ecfa19feccefe7d29c8f93abeb59bb549d..c1877d34b62c933e59890111f758cff65d734a00 100644 --- a/aom_dsp/mips/loopfilter_4_msa.c +++ b/aom_dsp/mips/loopfilter_4_msa.c @@ -11,7 +11,7 @@ #include "aom_dsp/mips/loopfilter_msa.h" -void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch, +void aom_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { @@ -39,7 +39,7 @@ void vpx_lpf_horizontal_4_msa(uint8_t *src, int32_t pitch, SD4(p1_d, p0_d, q0_d, q1_d, (src - 2 * pitch), pitch); } -void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, +void aom_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0_ptr, const uint8_t *limit0_ptr, const uint8_t *thresh0_ptr, @@ -71,7 +71,7 @@ void vpx_lpf_horizontal_4_dual_msa(uint8_t *src, int32_t pitch, ST_UB4(p1, p0, q0, q1, (src - 2 * pitch), pitch); } -void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch, +void aom_lpf_vertical_4_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { @@ -101,7 +101,7 @@ void vpx_lpf_vertical_4_msa(uint8_t *src, int32_t pitch, ST4x4_UB(vec3, vec3, 0, 1, 2, 3, src, pitch); } -void vpx_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch, +void aom_lpf_vertical_4_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0_ptr, const uint8_t *limit0_ptr, const uint8_t *thresh0_ptr, diff --git a/aom_dsp/mips/loopfilter_8_msa.c b/aom_dsp/mips/loopfilter_8_msa.c index adf797fbbc6da8388bac7ee4ea178b3a7a1bd635..1d4c1feb1f39f11d39118fd16e0d9b81fbba4227 100644 --- a/aom_dsp/mips/loopfilter_8_msa.c +++ b/aom_dsp/mips/loopfilter_8_msa.c @@ -11,7 +11,7 @@ #include "aom_dsp/mips/loopfilter_msa.h" -void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, +void aom_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { @@ -81,7 +81,7 @@ void vpx_lpf_horizontal_8_msa(uint8_t *src, int32_t pitch, } } -void vpx_lpf_horizontal_8_dual_msa( +void aom_lpf_horizontal_8_dual_msa( uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1, const uint8_t *thresh1) { @@ -152,7 +152,7 @@ void vpx_lpf_horizontal_8_dual_msa( } } -void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, +void aom_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit_ptr, const uint8_t *limit_ptr, const uint8_t *thresh_ptr, int32_t count) { @@ -229,7 +229,7 @@ void vpx_lpf_vertical_8_msa(uint8_t *src, int32_t pitch, } } -void vpx_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, +void aom_lpf_vertical_8_dual_msa(uint8_t *src, int32_t pitch, const uint8_t *b_limit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *b_limit1, const uint8_t *limit1, diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.c b/aom_dsp/mips/loopfilter_filters_dspr2.c index a238d097262927d680e2c834c922243af484a076..08c2ebe41e8d9ca6c7785f430a000f670cb967d6 100644 --- a/aom_dsp/mips/loopfilter_filters_dspr2.c +++ b/aom_dsp/mips/loopfilter_filters_dspr2.c @@ -11,16 +11,16 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_dsp/mips/common_dspr2.h" #include "aom_dsp/mips/loopfilter_filters_dspr2.h" #include "aom_dsp/mips/loopfilter_macros_dspr2.h" #include "aom_dsp/mips/loopfilter_masks_dspr2.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #if HAVE_DSPR2 -void vpx_lpf_horizontal_4_dspr2(unsigned char *s, int pitch, +void aom_lpf_horizontal_4_dspr2(unsigned char *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { uint8_t i; @@ -105,7 +105,7 @@ void vpx_lpf_horizontal_4_dspr2(unsigned char *s, int pitch, } } -void vpx_lpf_vertical_4_dspr2(unsigned char *s, int pitch, +void aom_lpf_vertical_4_dspr2(unsigned char *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { uint8_t i; @@ -282,46 +282,46 @@ void vpx_lpf_vertical_4_dspr2(unsigned char *s, int pitch, } } -void vpx_lpf_horizontal_4_dual_dspr2( +void aom_lpf_horizontal_4_dual_dspr2( uint8_t *s, int p /* pitch */, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); + aom_lpf_horizontal_4_dspr2(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_horizontal_4_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_horizontal_8_dual_dspr2( +void aom_lpf_horizontal_8_dual_dspr2( uint8_t *s, int p /* pitch */, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); + aom_lpf_horizontal_8_dspr2(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_horizontal_8_dspr2(s + 8, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_vertical_4_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1); + aom_lpf_vertical_4_dspr2(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_vertical_4_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_vertical_8_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { - vpx_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0, 1); - vpx_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1); + aom_lpf_vertical_8_dspr2(s, p, blimit0, limit0, thresh0, 1); + aom_lpf_vertical_8_dspr2(s + 8 * p, p, blimit1, limit1, thresh1, 1); } -void vpx_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit, +void aom_lpf_vertical_16_dual_dspr2(uint8_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { - vpx_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh); - vpx_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh); + aom_lpf_vertical_16_dspr2(s, p, blimit, limit, thresh); + aom_lpf_vertical_16_dspr2(s + 8 * p, p, blimit, limit, thresh); } #endif // #if HAVE_DSPR2 diff --git a/aom_dsp/mips/loopfilter_filters_dspr2.h b/aom_dsp/mips/loopfilter_filters_dspr2.h index 7e47d7b5b750521e060512c2e174917a3a24d0b6..aa86c074891ee4eebfa120ab62b57b71f374de85 100644 --- a/aom_dsp/mips/loopfilter_filters_dspr2.h +++ b/aom_dsp/mips/loopfilter_filters_dspr2.h @@ -14,9 +14,9 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" -#include "aom_mem/vpx_mem.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" +#include "aom_mem/aom_mem.h" #include "aom_ports/mem.h" #ifdef __cplusplus @@ -27,7 +27,7 @@ extern "C" { /* inputs & outputs are quad-byte vectors */ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1, uint32_t *ps0, uint32_t *qs0, uint32_t *qs1) { - int32_t vpx_filter_l, vpx_filter_r; + int32_t aom_filter_l, aom_filter_r; int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; int32_t subr_r, subr_l; uint32_t t1, t2, HWM, t3; @@ -73,33 +73,33 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1, hev_r = hev_r & HWM; __asm__ __volatile__( - /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */ - "subq_s.ph %[vpx_filter_l], %[vps1_l], %[vqs1_l] \n\t" - "subq_s.ph %[vpx_filter_r], %[vps1_r], %[vqs1_r] \n\t" + /* aom_filter = vp8_signed_char_clamp(ps1 - qs1); */ + "subq_s.ph %[aom_filter_l], %[vps1_l], %[vqs1_l] \n\t" + "subq_s.ph %[aom_filter_r], %[vps1_r], %[vqs1_r] \n\t" /* qs0 - ps0 */ "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t" "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t" - /* vpx_filter &= hev; */ - "and %[vpx_filter_l], %[vpx_filter_l], %[hev_l] \n\t" - "and %[vpx_filter_r], %[vpx_filter_r], %[hev_r] \n\t" + /* aom_filter &= hev; */ + "and %[aom_filter_l], %[aom_filter_l], %[hev_l] \n\t" + "and %[aom_filter_r], %[aom_filter_r], %[hev_r] \n\t" - /* vpx_filter = vp8_signed_char_clamp(vpx_filter + 3 * (qs0 - ps0)); */ - "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t" + /* aom_filter = vp8_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */ + "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t" + "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t" "xor %[invhev_l], %[hev_l], %[HWM] \n\t" - "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t" + "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t" + "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t" "xor %[invhev_r], %[hev_r], %[HWM] \n\t" - "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t" + "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t" + "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t" - /* vpx_filter &= mask; */ - "and %[vpx_filter_l], %[vpx_filter_l], %[mask_l] \n\t" - "and %[vpx_filter_r], %[vpx_filter_r], %[mask_r] \n\t" + /* aom_filter &= mask; */ + "and %[aom_filter_l], %[aom_filter_l], %[mask_l] \n\t" + "and %[aom_filter_r], %[aom_filter_r], %[mask_r] \n\t" - : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r), + : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r), [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r), [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r) : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l), @@ -110,13 +110,13 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1, /* save bottom 3 bits so that we round one side +4 and the other +3 */ __asm__ __volatile__( - /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */ - "addq_s.ph %[Filter1_l], %[vpx_filter_l], %[t2] \n\t" - "addq_s.ph %[Filter1_r], %[vpx_filter_r], %[t2] \n\t" + /* Filter2 = vp8_signed_char_clamp(aom_filter + 3) >>= 3; */ + "addq_s.ph %[Filter1_l], %[aom_filter_l], %[t2] \n\t" + "addq_s.ph %[Filter1_r], %[aom_filter_r], %[t2] \n\t" - /* Filter1 = vp8_signed_char_clamp(vpx_filter + 4) >>= 3; */ - "addq_s.ph %[Filter2_l], %[vpx_filter_l], %[t1] \n\t" - "addq_s.ph %[Filter2_r], %[vpx_filter_r], %[t1] \n\t" + /* Filter1 = vp8_signed_char_clamp(aom_filter + 4) >>= 3; */ + "addq_s.ph %[Filter2_l], %[aom_filter_l], %[t1] \n\t" + "addq_s.ph %[Filter2_r], %[aom_filter_r], %[t1] \n\t" "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t" "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t" @@ -139,22 +139,22 @@ static INLINE void filter_dspr2(uint32_t mask, uint32_t hev, uint32_t *ps1, [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), [vqs0_r] "+r"(vqs0_r) : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM), - [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r)); + [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r)); __asm__ __volatile__( - /* (vpx_filter += 1) >>= 1 */ + /* (aom_filter += 1) >>= 1 */ "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t" "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t" - /* vpx_filter &= ~hev; */ + /* aom_filter &= ~hev; */ "and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t" "and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t" - /* vps1 = vp8_signed_char_clamp(ps1 + vpx_filter); */ + /* vps1 = vp8_signed_char_clamp(ps1 + aom_filter); */ "addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t" "addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t" - /* vqs1 = vp8_signed_char_clamp(qs1 - vpx_filter); */ + /* vqs1 = vp8_signed_char_clamp(qs1 - aom_filter); */ "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t" "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t" @@ -194,7 +194,7 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1, uint32_t ps0, uint32_t qs0, uint32_t qs1, uint32_t *p1_f0, uint32_t *p0_f0, uint32_t *q0_f0, uint32_t *q1_f0) { - int32_t vpx_filter_l, vpx_filter_r; + int32_t aom_filter_l, aom_filter_r; int32_t Filter1_l, Filter1_r, Filter2_l, Filter2_r; int32_t subr_r, subr_l; uint32_t t1, t2, HWM, t3; @@ -240,33 +240,33 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1, hev_r = hev_r & HWM; __asm__ __volatile__( - /* vpx_filter = vp8_signed_char_clamp(ps1 - qs1); */ - "subq_s.ph %[vpx_filter_l], %[vps1_l], %[vqs1_l] \n\t" - "subq_s.ph %[vpx_filter_r], %[vps1_r], %[vqs1_r] \n\t" + /* aom_filter = vp8_signed_char_clamp(ps1 - qs1); */ + "subq_s.ph %[aom_filter_l], %[vps1_l], %[vqs1_l] \n\t" + "subq_s.ph %[aom_filter_r], %[vps1_r], %[vqs1_r] \n\t" /* qs0 - ps0 */ "subq_s.ph %[subr_l], %[vqs0_l], %[vps0_l] \n\t" "subq_s.ph %[subr_r], %[vqs0_r], %[vps0_r] \n\t" - /* vpx_filter &= hev; */ - "and %[vpx_filter_l], %[vpx_filter_l], %[hev_l] \n\t" - "and %[vpx_filter_r], %[vpx_filter_r], %[hev_r] \n\t" + /* aom_filter &= hev; */ + "and %[aom_filter_l], %[aom_filter_l], %[hev_l] \n\t" + "and %[aom_filter_r], %[aom_filter_r], %[hev_r] \n\t" - /* vpx_filter = vp8_signed_char_clamp(vpx_filter + 3 * (qs0 - ps0)); */ - "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t" + /* aom_filter = vp8_signed_char_clamp(aom_filter + 3 * (qs0 - ps0)); */ + "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t" + "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t" "xor %[invhev_l], %[hev_l], %[HWM] \n\t" - "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t" + "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t" + "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t" "xor %[invhev_r], %[hev_r], %[HWM] \n\t" - "addq_s.ph %[vpx_filter_l], %[vpx_filter_l], %[subr_l] \n\t" - "addq_s.ph %[vpx_filter_r], %[vpx_filter_r], %[subr_r] \n\t" + "addq_s.ph %[aom_filter_l], %[aom_filter_l], %[subr_l] \n\t" + "addq_s.ph %[aom_filter_r], %[aom_filter_r], %[subr_r] \n\t" - /* vpx_filter &= mask; */ - "and %[vpx_filter_l], %[vpx_filter_l], %[mask_l] \n\t" - "and %[vpx_filter_r], %[vpx_filter_r], %[mask_r] \n\t" + /* aom_filter &= mask; */ + "and %[aom_filter_l], %[aom_filter_l], %[mask_l] \n\t" + "and %[aom_filter_r], %[aom_filter_r], %[mask_r] \n\t" - : [vpx_filter_l] "=&r"(vpx_filter_l), [vpx_filter_r] "=&r"(vpx_filter_r), + : [aom_filter_l] "=&r"(aom_filter_l), [aom_filter_r] "=&r"(aom_filter_r), [subr_l] "=&r"(subr_l), [subr_r] "=&r"(subr_r), [invhev_l] "=&r"(invhev_l), [invhev_r] "=&r"(invhev_r) : [vps0_l] "r"(vps0_l), [vps0_r] "r"(vps0_r), [vps1_l] "r"(vps1_l), @@ -277,13 +277,13 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1, /* save bottom 3 bits so that we round one side +4 and the other +3 */ __asm__ __volatile__( - /* Filter2 = vp8_signed_char_clamp(vpx_filter + 3) >>= 3; */ - "addq_s.ph %[Filter1_l], %[vpx_filter_l], %[t2] \n\t" - "addq_s.ph %[Filter1_r], %[vpx_filter_r], %[t2] \n\t" + /* Filter2 = vp8_signed_char_clamp(aom_filter + 3) >>= 3; */ + "addq_s.ph %[Filter1_l], %[aom_filter_l], %[t2] \n\t" + "addq_s.ph %[Filter1_r], %[aom_filter_r], %[t2] \n\t" - /* Filter1 = vp8_signed_char_clamp(vpx_filter + 4) >>= 3; */ - "addq_s.ph %[Filter2_l], %[vpx_filter_l], %[t1] \n\t" - "addq_s.ph %[Filter2_r], %[vpx_filter_r], %[t1] \n\t" + /* Filter1 = vp8_signed_char_clamp(aom_filter + 4) >>= 3; */ + "addq_s.ph %[Filter2_l], %[aom_filter_l], %[t1] \n\t" + "addq_s.ph %[Filter2_r], %[aom_filter_r], %[t1] \n\t" "shra.ph %[Filter1_r], %[Filter1_r], 3 \n\t" "shra.ph %[Filter1_l], %[Filter1_l], 3 \n\t" @@ -306,22 +306,22 @@ static INLINE void filter1_dspr2(uint32_t mask, uint32_t hev, uint32_t ps1, [vps0_l] "+r"(vps0_l), [vps0_r] "+r"(vps0_r), [vqs0_l] "+r"(vqs0_l), [vqs0_r] "+r"(vqs0_r) : [t1] "r"(t1), [t2] "r"(t2), [HWM] "r"(HWM), - [vpx_filter_l] "r"(vpx_filter_l), [vpx_filter_r] "r"(vpx_filter_r)); + [aom_filter_l] "r"(aom_filter_l), [aom_filter_r] "r"(aom_filter_r)); __asm__ __volatile__( - /* (vpx_filter += 1) >>= 1 */ + /* (aom_filter += 1) >>= 1 */ "addqh.ph %[Filter1_l], %[Filter1_l], %[t3] \n\t" "addqh.ph %[Filter1_r], %[Filter1_r], %[t3] \n\t" - /* vpx_filter &= ~hev; */ + /* aom_filter &= ~hev; */ "and %[Filter1_l], %[Filter1_l], %[invhev_l] \n\t" "and %[Filter1_r], %[Filter1_r], %[invhev_r] \n\t" - /* vps1 = vp8_signed_char_clamp(ps1 + vpx_filter); */ + /* vps1 = vp8_signed_char_clamp(ps1 + aom_filter); */ "addq_s.ph %[vps1_l], %[vps1_l], %[Filter1_l] \n\t" "addq_s.ph %[vps1_r], %[vps1_r], %[Filter1_r] \n\t" - /* vqs1 = vp8_signed_char_clamp(qs1 - vpx_filter); */ + /* vqs1 = vp8_signed_char_clamp(qs1 - aom_filter); */ "subq_s.ph %[vqs1_l], %[vqs1_l], %[Filter1_l] \n\t" "subq_s.ph %[vqs1_r], %[vqs1_r], %[Filter1_r] \n\t" diff --git a/aom_dsp/mips/loopfilter_macros_dspr2.h b/aom_dsp/mips/loopfilter_macros_dspr2.h index 3199db4bb0b1b4a80df868c5c8e4b47573594c01..c7b1c7f5297ae80d5ad117d6a4f50ecbb5901ac6 100644 --- a/aom_dsp/mips/loopfilter_macros_dspr2.h +++ b/aom_dsp/mips/loopfilter_macros_dspr2.h @@ -14,9 +14,9 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" -#include "aom_mem/vpx_mem.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" +#include "aom_mem/aom_mem.h" #ifdef __cplusplus extern "C" { diff --git a/aom_dsp/mips/loopfilter_masks_dspr2.h b/aom_dsp/mips/loopfilter_masks_dspr2.h index 852afc6754cae80ec6d146fe4ba3ece7adc2eb59..b283e184a8b6f529b5533691e0def008c0c1f236 100644 --- a/aom_dsp/mips/loopfilter_masks_dspr2.h +++ b/aom_dsp/mips/loopfilter_masks_dspr2.h @@ -14,9 +14,9 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" -#include "aom_mem/vpx_mem.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" +#include "aom_mem/aom_mem.h" #ifdef __cplusplus extern "C" { diff --git a/aom_dsp/mips/loopfilter_mb_dspr2.c b/aom_dsp/mips/loopfilter_mb_dspr2.c index 4003e4a056eabd6bbfb16274b4cade30096c12a2..a389ee77125f7f12d010a3ea860b360763f2939a 100644 --- a/aom_dsp/mips/loopfilter_mb_dspr2.c +++ b/aom_dsp/mips/loopfilter_mb_dspr2.c @@ -11,16 +11,16 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_dsp/mips/common_dspr2.h" #include "aom_dsp/mips/loopfilter_filters_dspr2.h" #include "aom_dsp/mips/loopfilter_macros_dspr2.h" #include "aom_dsp/mips/loopfilter_masks_dspr2.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #if HAVE_DSPR2 -void vpx_lpf_horizontal_8_dspr2(unsigned char *s, int pitch, +void aom_lpf_horizontal_8_dspr2(unsigned char *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { uint32_t mask; @@ -287,7 +287,7 @@ void vpx_lpf_horizontal_8_dspr2(unsigned char *s, int pitch, } } -void vpx_lpf_vertical_8_dspr2(unsigned char *s, int pitch, +void aom_lpf_vertical_8_dspr2(unsigned char *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { uint8_t i; diff --git a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c index 2d2c16332ab4b62f6417b57644e51b3046334172..a2a6fe4f2b16d2c5d3c23284eb78ca3f144f0251 100644 --- a/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c +++ b/aom_dsp/mips/loopfilter_mb_horiz_dspr2.c @@ -11,16 +11,16 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_dsp/mips/common_dspr2.h" #include "aom_dsp/mips/loopfilter_filters_dspr2.h" #include "aom_dsp/mips/loopfilter_macros_dspr2.h" #include "aom_dsp/mips/loopfilter_masks_dspr2.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #if HAVE_DSPR2 -void vpx_lpf_horizontal_16_dspr2(unsigned char *s, int pitch, +void aom_lpf_horizontal_16_dspr2(unsigned char *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count) { uint32_t mask; diff --git a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c index 567d5edce648330cd4661a700c3bff0e677ec2aa..28528869b4b1efac7307f2a446bef8db3aaf1191 100644 --- a/aom_dsp/mips/loopfilter_mb_vert_dspr2.c +++ b/aom_dsp/mips/loopfilter_mb_vert_dspr2.c @@ -11,16 +11,16 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" #include "aom_dsp/mips/common_dspr2.h" #include "aom_dsp/mips/loopfilter_filters_dspr2.h" #include "aom_dsp/mips/loopfilter_macros_dspr2.h" #include "aom_dsp/mips/loopfilter_masks_dspr2.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #if HAVE_DSPR2 -void vpx_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit, +void aom_lpf_vertical_16_dspr2(uint8_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { uint8_t i; uint32_t mask, hev, flat, flat2; diff --git a/aom_dsp/mips/macros_msa.h b/aom_dsp/mips/macros_msa.h index 1390dd3fca7a01f7f1682355eeab3c74f727c985..cbfa6a134ed8ba0243fd02e9f48286a6efe551f4 100644 --- a/aom_dsp/mips/macros_msa.h +++ b/aom_dsp/mips/macros_msa.h @@ -14,8 +14,8 @@ #include -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #define LD_B(RTYPE, psrc) *((const RTYPE *)(psrc)) #define LD_UB(...) LD_B(v16u8, __VA_ARGS__) diff --git a/aom_dsp/mips/sad_msa.c b/aom_dsp/mips/sad_msa.c index 4216cd43d64b9613e0475f9e9e2ad3697f24f861..3c21f3c59b0e216e8ebec376600a3bfec751feac 100644 --- a/aom_dsp/mips/sad_msa.c +++ b/aom_dsp/mips/sad_msa.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/macros_msa.h" #define SAD_INSVE_W4(RTYPE, in0, in1, in2, in3, out) \ @@ -1261,142 +1261,142 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, } #define VPX_SAD_4xHEIGHT_MSA(height) \ - uint32_t vpx_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad4x##height##_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride) { \ return sad_4width_msa(src, src_stride, ref, ref_stride, height); \ } #define VPX_SAD_8xHEIGHT_MSA(height) \ - uint32_t vpx_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad8x##height##_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride) { \ return sad_8width_msa(src, src_stride, ref, ref_stride, height); \ } #define VPX_SAD_16xHEIGHT_MSA(height) \ - uint32_t vpx_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad16x##height##_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride) { \ return sad_16width_msa(src, src_stride, ref, ref_stride, height); \ } #define VPX_SAD_32xHEIGHT_MSA(height) \ - uint32_t vpx_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad32x##height##_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride) { \ return sad_32width_msa(src, src_stride, ref, ref_stride, height); \ } #define VPX_SAD_64xHEIGHT_MSA(height) \ - uint32_t vpx_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad64x##height##_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride) { \ return sad_64width_msa(src, src_stride, ref, ref_stride, height); \ } #define VPX_SAD_4xHEIGHTx3_MSA(height) \ - void vpx_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad4x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_4width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_8xHEIGHTx3_MSA(height) \ - void vpx_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad8x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_8width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_16xHEIGHTx3_MSA(height) \ - void vpx_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad16x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_16width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_32xHEIGHTx3_MSA(height) \ - void vpx_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad32x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_32width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_64xHEIGHTx3_MSA(height) \ - void vpx_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad64x##height##x3_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_64width_x3_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_4xHEIGHTx8_MSA(height) \ - void vpx_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad4x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_4width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_8xHEIGHTx8_MSA(height) \ - void vpx_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad8x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_8width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_16xHEIGHTx8_MSA(height) \ - void vpx_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad16x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_16width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_32xHEIGHTx8_MSA(height) \ - void vpx_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad32x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_32width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_64xHEIGHTx8_MSA(height) \ - void vpx_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad64x##height##x8_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ uint32_t *sads) { \ sad_64width_x8_msa(src, src_stride, ref, ref_stride, height, sads); \ } #define VPX_SAD_4xHEIGHTx4D_MSA(height) \ - void vpx_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad4x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *const refs[], \ int32_t ref_stride, uint32_t *sads) { \ sad_4width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ } #define VPX_SAD_8xHEIGHTx4D_MSA(height) \ - void vpx_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad8x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *const refs[], \ int32_t ref_stride, uint32_t *sads) { \ sad_8width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ } #define VPX_SAD_16xHEIGHTx4D_MSA(height) \ - void vpx_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad16x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *const refs[], \ int32_t ref_stride, uint32_t *sads) { \ sad_16width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ } #define VPX_SAD_32xHEIGHTx4D_MSA(height) \ - void vpx_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad32x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *const refs[], \ int32_t ref_stride, uint32_t *sads) { \ sad_32width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ } #define VPX_SAD_64xHEIGHTx4D_MSA(height) \ - void vpx_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ + void aom_sad64x##height##x4d_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *const refs[], \ int32_t ref_stride, uint32_t *sads) { \ sad_64width_x4d_msa(src, src_stride, refs, ref_stride, height, sads); \ } #define VPX_AVGSAD_4xHEIGHT_MSA(height) \ - uint32_t vpx_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad4x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ const uint8_t *second_pred) { \ return avgsad_4width_msa(src, src_stride, ref, ref_stride, height, \ @@ -1404,7 +1404,7 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, } #define VPX_AVGSAD_8xHEIGHT_MSA(height) \ - uint32_t vpx_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ + uint32_t aom_sad8x##height##_avg_msa(const uint8_t *src, int32_t src_stride, \ const uint8_t *ref, int32_t ref_stride, \ const uint8_t *second_pred) { \ return avgsad_8width_msa(src, src_stride, ref, ref_stride, height, \ @@ -1412,7 +1412,7 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, } #define VPX_AVGSAD_16xHEIGHT_MSA(height) \ - uint32_t vpx_sad16x##height##_avg_msa( \ + uint32_t aom_sad16x##height##_avg_msa( \ const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ int32_t ref_stride, const uint8_t *second_pred) { \ return avgsad_16width_msa(src, src_stride, ref, ref_stride, height, \ @@ -1420,7 +1420,7 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, } #define VPX_AVGSAD_32xHEIGHT_MSA(height) \ - uint32_t vpx_sad32x##height##_avg_msa( \ + uint32_t aom_sad32x##height##_avg_msa( \ const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ int32_t ref_stride, const uint8_t *second_pred) { \ return avgsad_32width_msa(src, src_stride, ref, ref_stride, height, \ @@ -1428,7 +1428,7 @@ static uint32_t avgsad_64width_msa(const uint8_t *src, int32_t src_stride, } #define VPX_AVGSAD_64xHEIGHT_MSA(height) \ - uint32_t vpx_sad64x##height##_avg_msa( \ + uint32_t aom_sad64x##height##_avg_msa( \ const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ int32_t ref_stride, const uint8_t *second_pred) { \ return avgsad_64width_msa(src, src_stride, ref, ref_stride, height, \ diff --git a/aom_dsp/mips/sub_pixel_variance_msa.c b/aom_dsp/mips/sub_pixel_variance_msa.c index 5a45964bb2ca4fbd1d3d7433a0fbc5ab8afb3876..221ec497d22c107fc69a80422221b40b92b5211c 100644 --- a/aom_dsp/mips/sub_pixel_variance_msa.c +++ b/aom_dsp/mips/sub_pixel_variance_msa.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" #include "aom_dsp/mips/macros_msa.h" #include "aom_dsp/variance.h" @@ -1619,7 +1619,7 @@ static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa( #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12); #define VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(wd, ht) \ - uint32_t vpx_sub_pixel_variance##wd##x##ht##_msa( \ + uint32_t aom_sub_pixel_variance##wd##x##ht##_msa( \ const uint8_t *src, int32_t src_stride, int32_t xoffset, \ int32_t yoffset, const uint8_t *ref, int32_t ref_stride, \ uint32_t *sse) { \ @@ -1645,7 +1645,7 @@ static uint32_t sub_pixel_avg_sse_diff_64width_hv_msa( \ var = VARIANCE_##wd##Wx##ht##H(*sse, diff); \ } else { \ - var = vpx_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \ + var = aom_variance##wd##x##ht##_msa(src, src_stride, ref, ref_stride, \ sse); \ } \ } \ @@ -1674,7 +1674,7 @@ VPX_SUB_PIXEL_VARIANCE_WDXHT_MSA(64, 64) /* clang-format on */ #define VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(wd, ht) \ - uint32_t vpx_sub_pixel_avg_variance##wd##x##ht##_msa( \ + uint32_t aom_sub_pixel_avg_variance##wd##x##ht##_msa( \ const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \ int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \ uint32_t *sse, const uint8_t *sec_pred) { \ @@ -1722,7 +1722,7 @@ VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 16) VPX_SUB_PIXEL_AVG_VARIANCE_WDXHT_MSA(32, 32) /* clang-format on */ -uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, +uint32_t aom_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, int32_t yoffset, const uint8_t *ref_ptr, @@ -1757,7 +1757,7 @@ uint32_t vpx_sub_pixel_avg_variance32x64_msa(const uint8_t *src_ptr, } #define VPX_SUB_PIXEL_AVG_VARIANCE64XHEIGHT_MSA(ht) \ - uint32_t vpx_sub_pixel_avg_variance64x##ht##_msa( \ + uint32_t aom_sub_pixel_avg_variance64x##ht##_msa( \ const uint8_t *src_ptr, int32_t src_stride, int32_t xoffset, \ int32_t yoffset, const uint8_t *ref_ptr, int32_t ref_stride, \ uint32_t *sse, const uint8_t *sec_pred) { \ diff --git a/aom_dsp/mips/subtract_msa.c b/aom_dsp/mips/subtract_msa.c index fff7cc19913e144ee8f11284b2846be367a15253..37b89765db2c0afeb24f8d11ed8ee131b1df88f3 100644 --- a/aom_dsp/mips/subtract_msa.c +++ b/aom_dsp/mips/subtract_msa.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/macros_msa.h" static void sub_blk_4x4_msa(const uint8_t *src_ptr, int32_t src_stride, @@ -227,7 +227,7 @@ static void sub_blk_64x64_msa(const uint8_t *src, int32_t src_stride, } } -void vpx_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr, +void aom_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr, ptrdiff_t diff_stride, const uint8_t *src_ptr, ptrdiff_t src_stride, const uint8_t *pred_ptr, ptrdiff_t pred_stride) { @@ -254,12 +254,12 @@ void vpx_subtract_block_msa(int32_t rows, int32_t cols, int16_t *diff_ptr, diff_stride); break; default: - vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, + aom_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride, pred_ptr, pred_stride); break; } } else { - vpx_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride, + aom_subtract_block_c(rows, cols, diff_ptr, diff_stride, src_ptr, src_stride, pred_ptr, pred_stride); } } diff --git a/aom_dsp/mips/variance_msa.c b/aom_dsp/mips/variance_msa.c index 518ea7f57ed86884c30497266dfef32dfb0957f9..e4c56e37aab2148300de20d10cb9dcb511c24198 100644 --- a/aom_dsp/mips/variance_msa.c +++ b/aom_dsp/mips/variance_msa.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/mips/macros_msa.h" #define CALC_MSE_B(src, ref, var) \ @@ -488,7 +488,7 @@ static uint32_t sse_64width_msa(const uint8_t *src_ptr, int32_t src_stride, return HADD_SW_S32(var); } -uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride, +uint32_t aom_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride, const uint8_t *ref_ptr, int32_t ref_stride) { uint32_t err = 0; uint32_t src0, src1, src2, src3; @@ -529,7 +529,7 @@ uint32_t vpx_get4x4sse_cs_msa(const uint8_t *src_ptr, int32_t src_stride, #define VARIANCE_64Wx64H(sse, diff) VARIANCE_LARGE_WxH(sse, diff, 12); #define VPX_VARIANCE_WDXHT_MSA(wd, ht) \ - uint32_t vpx_variance##wd##x##ht##_msa( \ + uint32_t aom_variance##wd##x##ht##_msa( \ const uint8_t *src, int32_t src_stride, const uint8_t *ref, \ int32_t ref_stride, uint32_t *sse) { \ int32_t diff; \ @@ -556,7 +556,7 @@ VPX_VARIANCE_WDXHT_MSA(32, 16) VPX_VARIANCE_WDXHT_MSA(32, 32) /* clang-format on */ -uint32_t vpx_variance32x64_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_variance32x64_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { int32_t diff; @@ -566,7 +566,7 @@ uint32_t vpx_variance32x64_msa(const uint8_t *src, int32_t src_stride, return VARIANCE_32Wx64H(*sse, diff); } -uint32_t vpx_variance64x32_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_variance64x32_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { int32_t diff; @@ -576,7 +576,7 @@ uint32_t vpx_variance64x32_msa(const uint8_t *src, int32_t src_stride, return VARIANCE_64Wx32H(*sse, diff); } -uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_variance64x64_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { int32_t diff; @@ -586,14 +586,14 @@ uint32_t vpx_variance64x64_msa(const uint8_t *src, int32_t src_stride, return VARIANCE_64Wx64H(*sse, diff); } -uint32_t vpx_mse8x8_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_mse8x8_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 8); return *sse; } -uint32_t vpx_mse8x16_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_mse8x16_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { *sse = sse_8width_msa(src, src_stride, ref, ref_stride, 16); @@ -601,7 +601,7 @@ uint32_t vpx_mse8x16_msa(const uint8_t *src, int32_t src_stride, return *sse; } -uint32_t vpx_mse16x8_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_mse16x8_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { *sse = sse_16width_msa(src, src_stride, ref, ref_stride, 8); @@ -609,7 +609,7 @@ uint32_t vpx_mse16x8_msa(const uint8_t *src, int32_t src_stride, return *sse; } -uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride, +uint32_t aom_mse16x16_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse) { *sse = sse_16width_msa(src, src_stride, ref, ref_stride, 16); @@ -617,16 +617,16 @@ uint32_t vpx_mse16x16_msa(const uint8_t *src, int32_t src_stride, return *sse; } -void vpx_get8x8var_msa(const uint8_t *src, int32_t src_stride, +void aom_get8x8var_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse, int32_t *sum) { *sse = sse_diff_8width_msa(src, src_stride, ref, ref_stride, 8, sum); } -void vpx_get16x16var_msa(const uint8_t *src, int32_t src_stride, +void aom_get16x16var_msa(const uint8_t *src, int32_t src_stride, const uint8_t *ref, int32_t ref_stride, uint32_t *sse, int32_t *sum) { *sse = sse_diff_16width_msa(src, src_stride, ref, ref_stride, 16, sum); } -uint32_t vpx_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); } +uint32_t aom_get_mb_ss_msa(const int16_t *src) { return get_mb_ss_msa(src); } diff --git a/aom_dsp/prob.c b/aom_dsp/prob.c index ac68be35e56e633df0811c5b76af46b2551faeb8..2ebe7f4d8a5a7727eef224e40d5069915beedb4b 100644 --- a/aom_dsp/prob.c +++ b/aom_dsp/prob.c @@ -11,7 +11,7 @@ #include "./prob.h" -const uint8_t vpx_norm[256] = { +const uint8_t aom_norm[256] = { 0, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, @@ -25,10 +25,10 @@ const uint8_t vpx_norm[256] = { }; static unsigned int tree_merge_probs_impl(unsigned int i, - const vpx_tree_index *tree, - const vpx_prob *pre_probs, + const aom_tree_index *tree, + const aom_prob *pre_probs, const unsigned int *counts, - vpx_prob *probs) { + aom_prob *probs) { const int l = tree[i]; const unsigned int left_count = (l <= 0) ? counts[-l] @@ -42,7 +42,7 @@ static unsigned int tree_merge_probs_impl(unsigned int i, return left_count + right_count; } -void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs, - const unsigned int *counts, vpx_prob *probs) { +void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs, + const unsigned int *counts, aom_prob *probs) { tree_merge_probs_impl(0, tree, pre_probs, counts, probs); } diff --git a/aom_dsp/prob.h b/aom_dsp/prob.h index 9edb953864df4fc43525f11896c02a28926047a3..dcb1f0efb8a5da486ccda0fbd2f6bfb61d65c29d 100644 --- a/aom_dsp/prob.h +++ b/aom_dsp/prob.h @@ -12,8 +12,8 @@ #ifndef VPX_DSP_PROB_H_ #define VPX_DSP_PROB_H_ -#include "./vpx_config.h" -#include "./vpx_dsp_common.h" +#include "./aom_config.h" +#include "./aom_dsp_common.h" #include "aom_ports/mem.h" @@ -21,50 +21,50 @@ extern "C" { #endif -typedef uint8_t vpx_prob; +typedef uint8_t aom_prob; #define MAX_PROB 255 -#define vpx_prob_half ((vpx_prob)128) +#define aom_prob_half ((aom_prob)128) -typedef int8_t vpx_tree_index; +typedef int8_t aom_tree_index; #define TREE_SIZE(leaf_count) (-2 + 2 * (leaf_count)) -#define vpx_complement(x) (255 - x) +#define aom_complement(x) (255 - x) #define MODE_MV_COUNT_SAT 20 /* We build coding trees compactly in arrays. - Each node of the tree is a pair of vpx_tree_indices. + Each node of the tree is a pair of aom_tree_indices. Array index often references a corresponding probability table. Index <= 0 means done encoding/decoding and value = -Index, Index > 0 means need another bit, specification at index. Nonnegative indices are always even; processing begins at node 0. */ -typedef const vpx_tree_index vpx_tree[]; +typedef const aom_tree_index aom_tree[]; -static INLINE vpx_prob clip_prob(int p) { +static INLINE aom_prob clip_prob(int p) { return (p > 255) ? 255 : (p < 1) ? 1 : p; } -static INLINE vpx_prob get_prob(int num, int den) { +static INLINE aom_prob get_prob(int num, int den) { return (den == 0) ? 128u : clip_prob(((int64_t)num * 256 + (den >> 1)) / den); } -static INLINE vpx_prob get_binary_prob(int n0, int n1) { +static INLINE aom_prob get_binary_prob(int n0, int n1) { return get_prob(n0, n0 + n1); } /* This function assumes prob1 and prob2 are already within [1,255] range. */ -static INLINE vpx_prob weighted_prob(int prob1, int prob2, int factor) { +static INLINE aom_prob weighted_prob(int prob1, int prob2, int factor) { return ROUND_POWER_OF_TWO(prob1 * (256 - factor) + prob2 * factor, 8); } -static INLINE vpx_prob merge_probs(vpx_prob pre_prob, const unsigned int ct[2], +static INLINE aom_prob merge_probs(aom_prob pre_prob, const unsigned int ct[2], unsigned int count_sat, unsigned int max_update_factor) { - const vpx_prob prob = get_binary_prob(ct[0], ct[1]); + const aom_prob prob = get_binary_prob(ct[0], ct[1]); const unsigned int count = VPXMIN(ct[0] + ct[1], count_sat); const unsigned int factor = max_update_factor * count / count_sat; return weighted_prob(pre_prob, prob, factor); @@ -76,7 +76,7 @@ static const int count_to_update_factor[MODE_MV_COUNT_SAT + 1] = { 70, 76, 83, 89, 96, 102, 108, 115, 121, 128 }; -static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob, +static INLINE aom_prob mode_mv_merge_probs(aom_prob pre_prob, const unsigned int ct[2]) { const unsigned int den = ct[0] + ct[1]; if (den == 0) { @@ -84,16 +84,16 @@ static INLINE vpx_prob mode_mv_merge_probs(vpx_prob pre_prob, } else { const unsigned int count = VPXMIN(den, MODE_MV_COUNT_SAT); const unsigned int factor = count_to_update_factor[count]; - const vpx_prob prob = + const aom_prob prob = clip_prob(((int64_t)(ct[0]) * 256 + (den >> 1)) / den); return weighted_prob(pre_prob, prob, factor); } } -void vpx_tree_merge_probs(const vpx_tree_index *tree, const vpx_prob *pre_probs, - const unsigned int *counts, vpx_prob *probs); +void aom_tree_merge_probs(const aom_tree_index *tree, const aom_prob *pre_probs, + const unsigned int *counts, aom_prob *probs); -DECLARE_ALIGNED(16, extern const uint8_t, vpx_norm[256]); +DECLARE_ALIGNED(16, extern const uint8_t, aom_norm[256]); #ifdef __cplusplus } // extern "C" diff --git a/aom_dsp/psnrhvs.c b/aom_dsp/psnrhvs.c index b5d5d65bbb3bce96b827da9da5f90210898e95bb..95750b4516a7873775f6f360e91534ffcc0aff27 100644 --- a/aom_dsp/psnrhvs.c +++ b/aom_dsp/psnrhvs.c @@ -15,8 +15,8 @@ #include #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/ssim.h" #include "aom_ports/system_state.h" @@ -28,7 +28,7 @@ static void od_bin_fdct8x8(tran_low_t *y, int ystride, const int16_t *x, int xstride) { (void)xstride; - vpx_fdct8x8(x, y, ystride); + aom_fdct8x8(x, y, ystride); } /* Normalized inverse quantization matrix for 8x8 DCT at the point of @@ -202,13 +202,13 @@ static double calc_psnrhvs(const unsigned char *_src, int _systride, ret /= pixels; return ret; } -double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source, +double aom_psnrhvs(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *y_psnrhvs, double *u_psnrhvs, double *v_psnrhvs) { double psnrhvs; const double par = 1.0; const int step = 7; - vpx_clear_system_state(); + aom_clear_system_state(); *y_psnrhvs = calc_psnrhvs(source->y_buffer, source->y_stride, dest->y_buffer, dest->y_stride, par, source->y_crop_width, source->y_crop_height, step, csf_y); diff --git a/aom_dsp/quantize.c b/aom_dsp/quantize.c index 3ef6d05bc60b57abd07c813154fd1611c900ea3b..2a194c6bb4a4f8daf8f8c8c53559853d6509c9a3 100644 --- a/aom_dsp/quantize.c +++ b/aom_dsp/quantize.c @@ -10,10 +10,10 @@ */ #include "aom_dsp/quantize.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #if CONFIG_AOM_QM -void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, +void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr, @@ -41,7 +41,7 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, +void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, @@ -69,7 +69,7 @@ void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, } #endif -void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr, @@ -100,7 +100,7 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, @@ -131,7 +131,7 @@ void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, } #endif -void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, @@ -193,7 +193,7 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, @@ -252,7 +252,7 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, } #endif -void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, @@ -317,7 +317,7 @@ void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_b_32x32_c( +void aom_highbd_quantize_b_32x32_c( const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, @@ -377,7 +377,7 @@ void vpx_highbd_quantize_b_32x32_c( } #endif #else -void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, +void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr) { @@ -401,7 +401,7 @@ void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, +void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, @@ -425,7 +425,7 @@ void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, } #endif -void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr) { @@ -451,7 +451,7 @@ void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, @@ -477,7 +477,7 @@ void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, } #endif -void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, @@ -528,7 +528,7 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, @@ -578,7 +578,7 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, } #endif -void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, @@ -633,7 +633,7 @@ void vpx_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_b_32x32_c( +void aom_highbd_quantize_b_32x32_c( const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, diff --git a/aom_dsp/quantize.h b/aom_dsp/quantize.h index 2b5d09756396a615778349d7f19c508a42604f86..cb941b5568a6e47424f193a7ac5dae9f9289bb87 100644 --- a/aom_dsp/quantize.h +++ b/aom_dsp/quantize.h @@ -12,25 +12,25 @@ #ifndef VPX_DSP_QUANTIZE_H_ #define VPX_DSP_QUANTIZE_H_ -#include "./vpx_config.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "./aom_config.h" +#include "aom_dsp/aom_dsp_common.h" #ifdef __cplusplus extern "C" { #endif #if CONFIG_AOM_QM -void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, +void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr); -void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr); -void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, @@ -39,18 +39,18 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *iscan, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr); #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, +void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr); -void vpx_highbd_quantize_dc_32x32( +void aom_highbd_quantize_dc_32x32( const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr); -void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, @@ -60,15 +60,15 @@ void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, const qm_val_t *qm_ptr, const qm_val_t *iqm_ptr); #endif #else -void vpx_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, +void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr); -void vpx_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr); -void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, @@ -76,18 +76,18 @@ void vpx_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan); #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, +void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr); -void vpx_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, +void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block, const int16_t *round_ptr, const int16_t quant_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t dequant_ptr, uint16_t *eob_ptr); -void vpx_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, +void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, diff --git a/aom_dsp/sad.c b/aom_dsp/sad.c index d26ffe902ef52a90107ca8f39e38143ea2dba561..a94876aca5b7b4f914e258f2ee26bf36201dddc6 100644 --- a/aom_dsp/sad.c +++ b/aom_dsp/sad.c @@ -11,10 +11,10 @@ #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" /* Sum the difference between every corresponding element of the buffers. */ @@ -32,7 +32,7 @@ static INLINE unsigned int sad(const uint8_t *a, int a_stride, const uint8_t *b, return sad; } -// TODO(johannkoenig): this moved to vpx_dsp, should be able to clean this up. +// TODO(johannkoenig): this moved to aom_dsp, should be able to clean this up. /* Remove dependency on vp9 variance function by duplicating vp9_comp_avg_pred. * The function averages every corresponding element of the buffers and stores * the value in a third buffer, comp_pred. @@ -74,11 +74,11 @@ static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8, #endif // CONFIG_VPX_HIGHBITDEPTH #define sadMxN(m, n) \ - unsigned int vpx_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ + unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride) { \ return sad(src, src_stride, ref, ref_stride, m, n); \ } \ - unsigned int vpx_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \ + unsigned int aom_sad##m##x##n##_avg_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ const uint8_t *second_pred) { \ uint8_t comp_pred[m * n]; \ @@ -89,24 +89,24 @@ static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8, // depending on call sites, pass **ref_array to avoid & in subsequent call and // de-dup with 4D below. #define sadMxNxK(m, n, k) \ - void vpx_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ + void aom_sad##m##x##n##x##k##_c(const uint8_t *src, int src_stride, \ const uint8_t *ref_array, int ref_stride, \ uint32_t *sad_array) { \ int i; \ for (i = 0; i < k; ++i) \ sad_array[i] = \ - vpx_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \ + aom_sad##m##x##n##_c(src, src_stride, &ref_array[i], ref_stride); \ } // This appears to be equivalent to the above when k == 4 and refs is const #define sadMxNx4D(m, n) \ - void vpx_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ + void aom_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ const uint8_t *const ref_array[], \ int ref_stride, uint32_t *sad_array) { \ int i; \ for (i = 0; i < 4; ++i) \ sad_array[i] = \ - vpx_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \ + aom_sad##m##x##n##_c(src, src_stride, ref_array[i], ref_stride); \ } /* clang-format off */ @@ -212,12 +212,12 @@ static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride, } #define highbd_sadMxN(m, n) \ - unsigned int vpx_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ + unsigned int aom_highbd_sad##m##x##n##_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, \ int ref_stride) { \ return highbd_sad(src, src_stride, ref, ref_stride, m, n); \ } \ - unsigned int vpx_highbd_sad##m##x##n##_avg_c( \ + unsigned int aom_highbd_sad##m##x##n##_avg_c( \ const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, \ const uint8_t *second_pred) { \ uint16_t comp_pred[m * n]; \ @@ -226,23 +226,23 @@ static INLINE unsigned int highbd_sadb(const uint8_t *a8, int a_stride, } #define highbd_sadMxNxK(m, n, k) \ - void vpx_highbd_sad##m##x##n##x##k##_c( \ + void aom_highbd_sad##m##x##n##x##k##_c( \ const uint8_t *src, int src_stride, const uint8_t *ref_array, \ int ref_stride, uint32_t *sad_array) { \ int i; \ for (i = 0; i < k; ++i) { \ - sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \ + sad_array[i] = aom_highbd_sad##m##x##n##_c(src, src_stride, \ &ref_array[i], ref_stride); \ } \ } #define highbd_sadMxNx4D(m, n) \ - void vpx_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ + void aom_highbd_sad##m##x##n##x4d_c(const uint8_t *src, int src_stride, \ const uint8_t *const ref_array[], \ int ref_stride, uint32_t *sad_array) { \ int i; \ for (i = 0; i < 4; ++i) { \ - sad_array[i] = vpx_highbd_sad##m##x##n##_c(src, src_stride, \ + sad_array[i] = aom_highbd_sad##m##x##n##_c(src, src_stride, \ ref_array[i], ref_stride); \ } \ } diff --git a/aom_dsp/ssim.c b/aom_dsp/ssim.c index e3f5d0dcd190d95505c6c359ae31bdb09b1b07a7..c2080709c9d8b5a06d21c958db6e9ebc9c290f9a 100644 --- a/aom_dsp/ssim.c +++ b/aom_dsp/ssim.c @@ -10,12 +10,12 @@ */ #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/ssim.h" #include "aom_ports/mem.h" #include "aom_ports/system_state.h" -void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp, +void aom_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr) { @@ -30,7 +30,7 @@ void vpx_ssim_parms_16x16_c(const uint8_t *s, int sp, const uint8_t *r, int rp, } } } -void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp, +void aom_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr) { int i, j; @@ -46,7 +46,7 @@ void vpx_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r, +void aom_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr) { @@ -87,7 +87,7 @@ static double similarity(uint32_t sum_s, uint32_t sum_r, uint32_t sum_sq_s, static double ssim_8x8(const uint8_t *s, int sp, const uint8_t *r, int rp) { uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0; - vpx_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, + aom_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr); return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64); } @@ -97,7 +97,7 @@ static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r, int rp, unsigned int bd) { uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0; const int oshift = bd - 8; - vpx_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, + aom_highbd_ssim_parms_8x8(s, sp, r, rp, &sum_s, &sum_r, &sum_sq_s, &sum_sq_r, &sum_sxr); return similarity(sum_s >> oshift, sum_r >> oshift, sum_sq_s >> (2 * oshift), sum_sq_r >> (2 * oshift), sum_sxr >> (2 * oshift), 64); @@ -107,7 +107,7 @@ static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r, // We are using a 8x8 moving window with starting location of each 8x8 window // on the 4x4 pixel grid. Such arrangement allows the windows to overlap // block boundaries to penalize blocking artifacts. -static double vpx_ssim2(const uint8_t *img1, const uint8_t *img2, +static double aom_ssim2(const uint8_t *img1, const uint8_t *img2, int stride_img1, int stride_img2, int width, int height) { int i, j; @@ -128,7 +128,7 @@ static double vpx_ssim2(const uint8_t *img1, const uint8_t *img2, } #if CONFIG_VPX_HIGHBITDEPTH -static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2, +static double aom_highbd_ssim2(const uint8_t *img1, const uint8_t *img2, int stride_img1, int stride_img2, int width, int height, unsigned int bd) { int i, j; @@ -151,18 +151,18 @@ static double vpx_highbd_ssim2(const uint8_t *img1, const uint8_t *img2, } #endif // CONFIG_VPX_HIGHBITDEPTH -double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source, +double aom_calc_ssim(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *weight) { double a, b, c; double ssimv; - a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, + a = aom_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, dest->y_stride, source->y_crop_width, source->y_crop_height); - b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, + b = aom_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height); - c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, + c = aom_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height); ssimv = a * .8 + .1 * (b + c); @@ -172,19 +172,19 @@ double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source, return ssimv; } -double vpx_calc_ssimg(const YV12_BUFFER_CONFIG *source, +double aom_calc_ssimg(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v) { double ssim_all = 0; double a, b, c; - a = vpx_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, + a = aom_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, dest->y_stride, source->y_crop_width, source->y_crop_height); - b = vpx_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, + b = aom_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height); - c = vpx_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, + c = aom_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height); *ssim_y = a; *ssim_u = b; @@ -272,11 +272,11 @@ static double ssimv_similarity2(const Ssimv *sv, int64_t n) { } static void ssimv_parms(uint8_t *img1, int img1_pitch, uint8_t *img2, int img2_pitch, Ssimv *sv) { - vpx_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r, + aom_ssim_parms_8x8(img1, img1_pitch, img2, img2_pitch, &sv->sum_s, &sv->sum_r, &sv->sum_sq_s, &sv->sum_sq_r, &sv->sum_sxr); } -double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, +double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, int img2_pitch, int width, int height, Ssimv *sv2, Metrics *m, int do_inconsistency) { double dssim_total = 0; @@ -287,7 +287,7 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, int c = 0; double norm; double old_ssim_total = 0; - vpx_clear_system_state(); + aom_clear_system_state(); // We can sample points as frequently as we like start with 1 per 4x4. for (i = 0; i < height; i += 4, img1 += img1_pitch * 4, img2 += img2_pitch * 4) { @@ -437,21 +437,21 @@ double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, } #if CONFIG_VPX_HIGHBITDEPTH -double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, +double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *weight, unsigned int bd) { double a, b, c; double ssimv; - a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, + a = aom_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, dest->y_stride, source->y_crop_width, source->y_crop_height, bd); - b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, + b = aom_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height, bd); - c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, + c = aom_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height, bd); @@ -462,21 +462,21 @@ double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, return ssimv; } -double vpx_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source, +double aom_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v, unsigned int bd) { double ssim_all = 0; double a, b, c; - a = vpx_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, + a = aom_highbd_ssim2(source->y_buffer, dest->y_buffer, source->y_stride, dest->y_stride, source->y_crop_width, source->y_crop_height, bd); - b = vpx_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, + b = aom_highbd_ssim2(source->u_buffer, dest->u_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height, bd); - c = vpx_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, + c = aom_highbd_ssim2(source->v_buffer, dest->v_buffer, source->uv_stride, dest->uv_stride, source->uv_crop_width, source->uv_crop_height, bd); *ssim_y = a; diff --git a/aom_dsp/ssim.h b/aom_dsp/ssim.h index 380211f7548489231a6dc996a8fdf322515647b9..afe9d9ac6a40a8486ba70a9ccaa86752e473defc 100644 --- a/aom_dsp/ssim.h +++ b/aom_dsp/ssim.h @@ -16,7 +16,7 @@ extern "C" { #endif -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_scale/yv12config.h" // metrics used for calculating ssim, ssim2, dssim, and ssimc @@ -61,31 +61,31 @@ typedef struct { double ssimcd; } Metrics; -double vpx_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, +double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2, int img2_pitch, int width, int height, Ssimv *sv2, Metrics *m, int do_inconsistency); -double vpx_calc_ssim(const YV12_BUFFER_CONFIG *source, +double aom_calc_ssim(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *weight); -double vpx_calc_ssimg(const YV12_BUFFER_CONFIG *source, +double aom_calc_ssimg(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v); -double vpx_calc_fastssim(const YV12_BUFFER_CONFIG *source, +double aom_calc_fastssim(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v); -double vpx_psnrhvs(const YV12_BUFFER_CONFIG *source, +double aom_psnrhvs(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v); #if CONFIG_VPX_HIGHBITDEPTH -double vpx_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, +double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *weight, unsigned int bd); -double vpx_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source, +double aom_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source, const YV12_BUFFER_CONFIG *dest, double *ssim_y, double *ssim_u, double *ssim_v, unsigned int bd); #endif // CONFIG_VPX_HIGHBITDEPTH diff --git a/aom_dsp/subtract.c b/aom_dsp/subtract.c index 871549a504e55f3cb69e4e33a3f5c4161bdd7583..3890d46bed8e319826bbbf6409ea3a906c6a5575 100644 --- a/aom_dsp/subtract.c +++ b/aom_dsp/subtract.c @@ -11,13 +11,13 @@ #include -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" -void vpx_subtract_block_c(int rows, int cols, int16_t *diff, +void aom_subtract_block_c(int rows, int cols, int16_t *diff, ptrdiff_t diff_stride, const uint8_t *src, ptrdiff_t src_stride, const uint8_t *pred, ptrdiff_t pred_stride) { @@ -33,7 +33,7 @@ void vpx_subtract_block_c(int rows, int cols, int16_t *diff, } #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_subtract_block_c(int rows, int cols, int16_t *diff, +void aom_highbd_subtract_block_c(int rows, int cols, int16_t *diff, ptrdiff_t diff_stride, const uint8_t *src8, ptrdiff_t src_stride, const uint8_t *pred8, ptrdiff_t pred_stride, int bd) { diff --git a/aom_dsp/txfm_common.h b/aom_dsp/txfm_common.h index a027333ead0782d5f8e2f272ea9e26906f8f040f..6fda5b5a4d71f87236406602c39b1651bb280b4f 100644 --- a/aom_dsp/txfm_common.h +++ b/aom_dsp/txfm_common.h @@ -12,7 +12,7 @@ #ifndef VPX_DSP_TXFM_COMMON_H_ #define VPX_DSP_TXFM_COMMON_H_ -#include "aom_dsp/vpx_dsp_common.h" +#include "aom_dsp/aom_dsp_common.h" // Constants and Macros used by all idct/dct functions #define DCT_CONST_BITS 14 diff --git a/aom_dsp/variance.c b/aom_dsp/variance.c index 4eb963c25d6b15265708ae6e72371fcc94164c79..33675383c49ffe40798e9f85a4b2f96d212fab3c 100644 --- a/aom_dsp/variance.c +++ b/aom_dsp/variance.c @@ -9,11 +9,11 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #include "aom_dsp/variance.h" @@ -22,7 +22,7 @@ static const uint8_t bilinear_filters[8][2] = { { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 }, }; -uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b, +uint32_t aom_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride) { int distortion = 0; int r, c; @@ -40,7 +40,7 @@ uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b, return distortion; } -uint32_t vpx_get_mb_ss_c(const int16_t *a) { +uint32_t aom_get_mb_ss_c(const int16_t *a) { unsigned int i, sum = 0; for (i = 0; i < 256; ++i) { @@ -50,22 +50,22 @@ uint32_t vpx_get_mb_ss_c(const int16_t *a) { return sum; } -uint32_t vpx_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride, +uint32_t aom_variance_halfpixvar16x16_h_c(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, uint32_t *sse) { - return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse); + return aom_sub_pixel_variance16x16_c(a, a_stride, 4, 0, b, b_stride, sse); } -uint32_t vpx_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride, +uint32_t aom_variance_halfpixvar16x16_v_c(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, uint32_t *sse) { - return vpx_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse); + return aom_sub_pixel_variance16x16_c(a, a_stride, 0, 4, b, b_stride, sse); } -uint32_t vpx_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride, +uint32_t aom_variance_halfpixvar16x16_hv_c(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, uint32_t *sse) { - return vpx_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse); + return aom_sub_pixel_variance16x16_c(a, a_stride, 4, 4, b, b_stride, sse); } static void variance(const uint8_t *a, int a_stride, const uint8_t *b, @@ -146,7 +146,7 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, } #define VAR(W, H) \ - uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + uint32_t aom_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ const uint8_t *b, int b_stride, \ uint32_t *sse) { \ int sum; \ @@ -155,7 +155,7 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, } #define SUBPIX_VAR(W, H) \ - uint32_t vpx_sub_pixel_variance##W##x##H##_c( \ + uint32_t aom_sub_pixel_variance##W##x##H##_c( \ const uint8_t *a, int a_stride, int xoffset, int yoffset, \ const uint8_t *b, int b_stride, uint32_t *sse) { \ uint16_t fdata3[(H + 1) * W]; \ @@ -166,11 +166,11 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \ + return aom_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \ } #define SUBPIX_AVG_VAR(W, H) \ - uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c( \ + uint32_t aom_sub_pixel_avg_variance##W##x##H##_c( \ const uint8_t *a, int a_stride, int xoffset, int yoffset, \ const uint8_t *b, int b_stride, uint32_t *sse, \ const uint8_t *second_pred) { \ @@ -183,9 +183,9 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \ + aom_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \ \ - return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \ + return aom_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \ } /* Identical to the variance call except it takes an additional parameter, sum, @@ -193,7 +193,7 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, * sse - sum^2 / w*h */ #define GET_VAR(W, H) \ - void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \ + void aom_get##W##x##H##var_c(const uint8_t *a, int a_stride, \ const uint8_t *b, int b_stride, uint32_t *sse, \ int *sum) { \ variance(a, a_stride, b, b_stride, W, H, sse, sum); \ @@ -204,7 +204,7 @@ static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b, * variable. */ #define MSE(W, H) \ - uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \ + uint32_t aom_mse##W##x##H##_c(const uint8_t *a, int a_stride, \ const uint8_t *b, int b_stride, \ uint32_t *sse) { \ int sum; \ @@ -242,7 +242,7 @@ MSE(8, 16) MSE(8, 8) /* clang-format on */ -void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, +void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride) { int i, j; @@ -310,7 +310,7 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride, } #define HIGHBD_VAR(W, H) \ - uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + uint32_t aom_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ const uint8_t *b, int b_stride, \ uint32_t *sse) { \ int sum; \ @@ -318,7 +318,7 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride, return *sse - (((int64_t)sum * sum) / (W * H)); \ } \ \ - uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + uint32_t aom_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ const uint8_t *b, int b_stride, \ uint32_t *sse) { \ int sum; \ @@ -326,7 +326,7 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride, return *sse - (((int64_t)sum * sum) / (W * H)); \ } \ \ - uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ + uint32_t aom_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \ const uint8_t *b, int b_stride, \ uint32_t *sse) { \ int sum; \ @@ -335,26 +335,26 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride, } #define HIGHBD_GET_VAR(S) \ - void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ + void aom_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ uint32_t *sse, int *sum) { \ highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ } \ \ - void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ + void aom_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ uint32_t *sse, int *sum) { \ highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ } \ \ - void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ + void aom_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ uint32_t *sse, int *sum) { \ highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \ } #define HIGHBD_MSE(W, H) \ - uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ + uint32_t aom_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ uint32_t *sse) { \ int sum; \ @@ -362,7 +362,7 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride, return *sse; \ } \ \ - uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ + uint32_t aom_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ uint32_t *sse) { \ int sum; \ @@ -370,7 +370,7 @@ static void highbd_12_variance(const uint8_t *a8, int a_stride, return *sse; \ } \ \ - uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ + uint32_t aom_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \ const uint8_t *ref, int ref_stride, \ uint32_t *sse) { \ int sum; \ @@ -421,7 +421,7 @@ static void highbd_var_filter_block2d_bil_second_pass( } #define HIGHBD_SUBPIX_VAR(W, H) \ - uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \ + uint32_t aom_highbd_8_sub_pixel_variance##W##x##H##_c( \ const uint8_t *src, int src_stride, int xoffset, int yoffset, \ const uint8_t *dst, int dst_stride, uint32_t *sse) { \ uint16_t fdata3[(H + 1) * W]; \ @@ -432,11 +432,11 @@ static void highbd_var_filter_block2d_bil_second_pass( highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ + return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ dst, dst_stride, sse); \ } \ \ - uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \ + uint32_t aom_highbd_10_sub_pixel_variance##W##x##H##_c( \ const uint8_t *src, int src_stride, int xoffset, int yoffset, \ const uint8_t *dst, int dst_stride, uint32_t *sse) { \ uint16_t fdata3[(H + 1) * W]; \ @@ -447,11 +447,11 @@ static void highbd_var_filter_block2d_bil_second_pass( highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ + return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ dst, dst_stride, sse); \ } \ \ - uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \ + uint32_t aom_highbd_12_sub_pixel_variance##W##x##H##_c( \ const uint8_t *src, int src_stride, int xoffset, int yoffset, \ const uint8_t *dst, int dst_stride, uint32_t *sse) { \ uint16_t fdata3[(H + 1) * W]; \ @@ -462,12 +462,12 @@ static void highbd_var_filter_block2d_bil_second_pass( highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ + return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \ dst, dst_stride, sse); \ } #define HIGHBD_SUBPIX_AVG_VAR(W, H) \ - uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \ + uint32_t aom_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \ const uint8_t *src, int src_stride, int xoffset, int yoffset, \ const uint8_t *dst, int dst_stride, uint32_t *sse, \ const uint8_t *second_pred) { \ @@ -480,14 +480,14 @@ static void highbd_var_filter_block2d_bil_second_pass( highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ + aom_highbd_comp_avg_pred(temp3, second_pred, W, H, \ CONVERT_TO_BYTEPTR(temp2), W); \ \ - return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ + return aom_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ dst, dst_stride, sse); \ } \ \ - uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \ + uint32_t aom_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \ const uint8_t *src, int src_stride, int xoffset, int yoffset, \ const uint8_t *dst, int dst_stride, uint32_t *sse, \ const uint8_t *second_pred) { \ @@ -500,14 +500,14 @@ static void highbd_var_filter_block2d_bil_second_pass( highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ + aom_highbd_comp_avg_pred(temp3, second_pred, W, H, \ CONVERT_TO_BYTEPTR(temp2), W); \ \ - return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ + return aom_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ dst, dst_stride, sse); \ } \ \ - uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \ + uint32_t aom_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \ const uint8_t *src, int src_stride, int xoffset, int yoffset, \ const uint8_t *dst, int dst_stride, uint32_t *sse, \ const uint8_t *second_pred) { \ @@ -520,10 +520,10 @@ static void highbd_var_filter_block2d_bil_second_pass( highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \ bilinear_filters[yoffset]); \ \ - vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \ + aom_highbd_comp_avg_pred(temp3, second_pred, W, H, \ CONVERT_TO_BYTEPTR(temp2), W); \ \ - return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ + return aom_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \ dst, dst_stride, sse); \ } @@ -557,7 +557,7 @@ HIGHBD_MSE(8, 16) HIGHBD_MSE(8, 8) /* clang-format on */ -void vpx_highbd_comp_avg_pred(uint16_t *comp_pred, const uint8_t *pred8, +void aom_highbd_comp_avg_pred(uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride) { int i, j; diff --git a/aom_dsp/variance.h b/aom_dsp/variance.h index 8b1e1d19d9aed3d74d3da97bc1c4b747fb7dfe0d..81966fc6ba19a36eb5ecde6926e7715297f1697c 100644 --- a/aom_dsp/variance.h +++ b/aom_dsp/variance.h @@ -12,9 +12,9 @@ #ifndef VPX_DSP_VARIANCE_H_ #define VPX_DSP_VARIANCE_H_ -#include "./vpx_config.h" +#include "./aom_config.h" -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #ifdef __cplusplus extern "C" { @@ -23,48 +23,48 @@ extern "C" { #define FILTER_BITS 7 #define FILTER_WEIGHT 128 -typedef unsigned int (*vpx_sad_fn_t)(const uint8_t *a, int a_stride, +typedef unsigned int (*aom_sad_fn_t)(const uint8_t *a, int a_stride, const uint8_t *b_ptr, int b_stride); -typedef unsigned int (*vpx_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride, +typedef unsigned int (*aom_sad_avg_fn_t)(const uint8_t *a_ptr, int a_stride, const uint8_t *b_ptr, int b_stride, const uint8_t *second_pred); typedef void (*vp8_copy32xn_fn_t)(const uint8_t *a, int a_stride, uint8_t *b, int b_stride, int n); -typedef void (*vpx_sad_multi_fn_t)(const uint8_t *a, int a_stride, +typedef void (*aom_sad_multi_fn_t)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sad_array); -typedef void (*vpx_sad_multi_d_fn_t)(const uint8_t *a, int a_stride, +typedef void (*aom_sad_multi_d_fn_t)(const uint8_t *a, int a_stride, const uint8_t *const b_array[], int b_stride, unsigned int *sad_array); -typedef unsigned int (*vpx_variance_fn_t)(const uint8_t *a, int a_stride, +typedef unsigned int (*aom_variance_fn_t)(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse); -typedef unsigned int (*vpx_subpixvariance_fn_t)(const uint8_t *a, int a_stride, +typedef unsigned int (*aom_subpixvariance_fn_t)(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, unsigned int *sse); -typedef unsigned int (*vpx_subp_avg_variance_fn_t)( +typedef unsigned int (*aom_subp_avg_variance_fn_t)( const uint8_t *a_ptr, int a_stride, int xoffset, int yoffset, const uint8_t *b_ptr, int b_stride, unsigned int *sse, const uint8_t *second_pred); #if CONFIG_VP10 -typedef struct vpx_variance_vtable { - vpx_sad_fn_t sdf; - vpx_sad_avg_fn_t sdaf; - vpx_variance_fn_t vf; - vpx_subpixvariance_fn_t svf; - vpx_subp_avg_variance_fn_t svaf; - vpx_sad_multi_fn_t sdx3f; - vpx_sad_multi_fn_t sdx8f; - vpx_sad_multi_d_fn_t sdx4df; -} vpx_variance_fn_ptr_t; +typedef struct aom_variance_vtable { + aom_sad_fn_t sdf; + aom_sad_avg_fn_t sdaf; + aom_variance_fn_t vf; + aom_subpixvariance_fn_t svf; + aom_subp_avg_variance_fn_t svaf; + aom_sad_multi_fn_t sdx3f; + aom_sad_multi_fn_t sdx8f; + aom_sad_multi_d_fn_t sdx4df; +} aom_variance_fn_ptr_t; #endif // CONFIG_VP10 #ifdef __cplusplus diff --git a/aom_dsp/vpx_dsp_rtcd_defs.pl b/aom_dsp/vpx_dsp_rtcd_defs.pl deleted file mode 100644 index c9c8cf705ed72943b856d2e94f8c742d232f94b8..0000000000000000000000000000000000000000 --- a/aom_dsp/vpx_dsp_rtcd_defs.pl +++ /dev/null @@ -1,1919 +0,0 @@ -sub vpx_dsp_forward_decls() { -print < -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/x86/convolve.h" #include "aom_ports/mem.h" @@ -63,7 +63,7 @@ DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = { #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x) #endif // __clang__ -static void vpx_filter_block1d16_h8_avx2( +static void aom_filter_block1d16_h8_avx2( const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr, ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) { __m128i filtersReg; @@ -291,7 +291,7 @@ static void vpx_filter_block1d16_h8_avx2( } } -static void vpx_filter_block1d16_v8_avx2( +static void aom_filter_block1d16_v8_avx2( const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr, ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) { __m128i filtersReg; @@ -527,41 +527,41 @@ static void vpx_filter_block1d16_v8_avx2( } #if HAVE_AVX2 && HAVE_SSSE3 -filter8_1dfunction vpx_filter_block1d4_v8_ssse3; +filter8_1dfunction aom_filter_block1d4_v8_ssse3; #if ARCH_X86_64 -filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3; -filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3; -filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3; -#define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_intrin_ssse3 -#define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_intrin_ssse3 -#define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_intrin_ssse3 +filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3; +filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3; +filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3; +#define aom_filter_block1d8_v8_avx2 aom_filter_block1d8_v8_intrin_ssse3 +#define aom_filter_block1d8_h8_avx2 aom_filter_block1d8_h8_intrin_ssse3 +#define aom_filter_block1d4_h8_avx2 aom_filter_block1d4_h8_intrin_ssse3 #else // ARCH_X86 -filter8_1dfunction vpx_filter_block1d8_v8_ssse3; -filter8_1dfunction vpx_filter_block1d8_h8_ssse3; -filter8_1dfunction vpx_filter_block1d4_h8_ssse3; -#define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_ssse3 -#define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_ssse3 -#define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_ssse3 +filter8_1dfunction aom_filter_block1d8_v8_ssse3; +filter8_1dfunction aom_filter_block1d8_h8_ssse3; +filter8_1dfunction aom_filter_block1d4_h8_ssse3; +#define aom_filter_block1d8_v8_avx2 aom_filter_block1d8_v8_ssse3 +#define aom_filter_block1d8_h8_avx2 aom_filter_block1d8_h8_ssse3 +#define aom_filter_block1d4_h8_avx2 aom_filter_block1d4_h8_ssse3 #endif // ARCH_X86_64 -filter8_1dfunction vpx_filter_block1d16_v2_ssse3; -filter8_1dfunction vpx_filter_block1d16_h2_ssse3; -filter8_1dfunction vpx_filter_block1d8_v2_ssse3; -filter8_1dfunction vpx_filter_block1d8_h2_ssse3; -filter8_1dfunction vpx_filter_block1d4_v2_ssse3; -filter8_1dfunction vpx_filter_block1d4_h2_ssse3; -#define vpx_filter_block1d4_v8_avx2 vpx_filter_block1d4_v8_ssse3 -#define vpx_filter_block1d16_v2_avx2 vpx_filter_block1d16_v2_ssse3 -#define vpx_filter_block1d16_h2_avx2 vpx_filter_block1d16_h2_ssse3 -#define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3 -#define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3 -#define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3 -#define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3 -// void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride, +filter8_1dfunction aom_filter_block1d16_v2_ssse3; +filter8_1dfunction aom_filter_block1d16_h2_ssse3; +filter8_1dfunction aom_filter_block1d8_v2_ssse3; +filter8_1dfunction aom_filter_block1d8_h2_ssse3; +filter8_1dfunction aom_filter_block1d4_v2_ssse3; +filter8_1dfunction aom_filter_block1d4_h2_ssse3; +#define aom_filter_block1d4_v8_avx2 aom_filter_block1d4_v8_ssse3 +#define aom_filter_block1d16_v2_avx2 aom_filter_block1d16_v2_ssse3 +#define aom_filter_block1d16_h2_avx2 aom_filter_block1d16_h2_ssse3 +#define aom_filter_block1d8_v2_avx2 aom_filter_block1d8_v2_ssse3 +#define aom_filter_block1d8_h2_avx2 aom_filter_block1d8_h2_ssse3 +#define aom_filter_block1d4_v2_avx2 aom_filter_block1d4_v2_ssse3 +#define aom_filter_block1d4_h2_avx2 aom_filter_block1d4_h2_ssse3 +// void aom_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, // int w, int h); -// void vpx_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, @@ -569,7 +569,7 @@ filter8_1dfunction vpx_filter_block1d4_h2_ssse3; FUN_CONV_1D(horiz, x_step_q4, filter_x, h, src, , avx2); FUN_CONV_1D(vert, y_step_q4, filter_y, v, src - src_stride * 3, , avx2); -// void vpx_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, diff --git a/aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c similarity index 94% rename from aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c rename to aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c index 06e563db90d619b4b91c61060cd422c2a67f624b..c02e09dbc440483a79b5c2c30055e176957eeff9 100644 --- a/aom_dsp/x86/vpx_subpixel_8t_intrin_ssse3.c +++ b/aom_dsp/x86/aom_subpixel_8t_intrin_ssse3.c @@ -15,10 +15,10 @@ #include -#include "./vpx_dsp_rtcd.h" -#include "aom_dsp/vpx_filter.h" +#include "./aom_dsp_rtcd.h" +#include "aom_dsp/aom_filter.h" #include "aom_dsp/x86/convolve.h" -#include "aom_mem/vpx_mem.h" +#include "aom_mem/aom_mem.h" #include "aom_ports/mem.h" #include "aom_ports/emmintrin_compat.h" @@ -49,11 +49,11 @@ DECLARE_ALIGNED(16, static const uint8_t, filt4_global[16]) = { }; // These are reused by the avx2 intrinsics. -filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3; -filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3; -filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3; +filter8_1dfunction aom_filter_block1d8_v8_intrin_ssse3; +filter8_1dfunction aom_filter_block1d8_h8_intrin_ssse3; +filter8_1dfunction aom_filter_block1d4_h8_intrin_ssse3; -void vpx_filter_block1d4_h8_intrin_ssse3( +void aom_filter_block1d4_h8_intrin_ssse3( const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr, ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) { __m128i firstFilters, secondFilters, shuffle1, shuffle2; @@ -121,7 +121,7 @@ void vpx_filter_block1d4_h8_intrin_ssse3( } } -void vpx_filter_block1d8_h8_intrin_ssse3( +void aom_filter_block1d8_h8_intrin_ssse3( const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr, ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter) { __m128i firstFilters, secondFilters, thirdFilters, forthFilters, srcReg; @@ -198,7 +198,7 @@ void vpx_filter_block1d8_h8_intrin_ssse3( } } -void vpx_filter_block1d8_v8_intrin_ssse3( +void aom_filter_block1d8_v8_intrin_ssse3( const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr, ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter) { __m128i addFilterReg64, filtersReg, minReg; @@ -283,48 +283,48 @@ void vpx_filter_block1d8_v8_intrin_ssse3( } } -filter8_1dfunction vpx_filter_block1d16_v8_ssse3; -filter8_1dfunction vpx_filter_block1d16_h8_ssse3; -filter8_1dfunction vpx_filter_block1d8_v8_ssse3; -filter8_1dfunction vpx_filter_block1d8_h8_ssse3; -filter8_1dfunction vpx_filter_block1d4_v8_ssse3; -filter8_1dfunction vpx_filter_block1d4_h8_ssse3; -filter8_1dfunction vpx_filter_block1d16_v8_avg_ssse3; -filter8_1dfunction vpx_filter_block1d16_h8_avg_ssse3; -filter8_1dfunction vpx_filter_block1d8_v8_avg_ssse3; -filter8_1dfunction vpx_filter_block1d8_h8_avg_ssse3; -filter8_1dfunction vpx_filter_block1d4_v8_avg_ssse3; -filter8_1dfunction vpx_filter_block1d4_h8_avg_ssse3; - -filter8_1dfunction vpx_filter_block1d16_v2_ssse3; -filter8_1dfunction vpx_filter_block1d16_h2_ssse3; -filter8_1dfunction vpx_filter_block1d8_v2_ssse3; -filter8_1dfunction vpx_filter_block1d8_h2_ssse3; -filter8_1dfunction vpx_filter_block1d4_v2_ssse3; -filter8_1dfunction vpx_filter_block1d4_h2_ssse3; -filter8_1dfunction vpx_filter_block1d16_v2_avg_ssse3; -filter8_1dfunction vpx_filter_block1d16_h2_avg_ssse3; -filter8_1dfunction vpx_filter_block1d8_v2_avg_ssse3; -filter8_1dfunction vpx_filter_block1d8_h2_avg_ssse3; -filter8_1dfunction vpx_filter_block1d4_v2_avg_ssse3; -filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3; - -// void vpx_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, +filter8_1dfunction aom_filter_block1d16_v8_ssse3; +filter8_1dfunction aom_filter_block1d16_h8_ssse3; +filter8_1dfunction aom_filter_block1d8_v8_ssse3; +filter8_1dfunction aom_filter_block1d8_h8_ssse3; +filter8_1dfunction aom_filter_block1d4_v8_ssse3; +filter8_1dfunction aom_filter_block1d4_h8_ssse3; +filter8_1dfunction aom_filter_block1d16_v8_avg_ssse3; +filter8_1dfunction aom_filter_block1d16_h8_avg_ssse3; +filter8_1dfunction aom_filter_block1d8_v8_avg_ssse3; +filter8_1dfunction aom_filter_block1d8_h8_avg_ssse3; +filter8_1dfunction aom_filter_block1d4_v8_avg_ssse3; +filter8_1dfunction aom_filter_block1d4_h8_avg_ssse3; + +filter8_1dfunction aom_filter_block1d16_v2_ssse3; +filter8_1dfunction aom_filter_block1d16_h2_ssse3; +filter8_1dfunction aom_filter_block1d8_v2_ssse3; +filter8_1dfunction aom_filter_block1d8_h2_ssse3; +filter8_1dfunction aom_filter_block1d4_v2_ssse3; +filter8_1dfunction aom_filter_block1d4_h2_ssse3; +filter8_1dfunction aom_filter_block1d16_v2_avg_ssse3; +filter8_1dfunction aom_filter_block1d16_h2_avg_ssse3; +filter8_1dfunction aom_filter_block1d8_v2_avg_ssse3; +filter8_1dfunction aom_filter_block1d8_h2_avg_ssse3; +filter8_1dfunction aom_filter_block1d4_v2_avg_ssse3; +filter8_1dfunction aom_filter_block1d4_h2_avg_ssse3; + +// void aom_convolve8_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, // int w, int h); -// void vpx_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, // int w, int h); -// void vpx_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_avg_horiz_ssse3(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, // int w, int h); -// void vpx_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_avg_vert_ssse3(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, @@ -875,7 +875,7 @@ static int get_filter_offset(const int16_t *f, const InterpKernel *base) { return (int)((const InterpKernel *)(intptr_t)f - base); } -void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, +void aom_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h) { @@ -889,12 +889,12 @@ void vpx_scaled_2d_ssse3(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, x_step_q4, filters_y, y0_q4, y_step_q4, w, h); } -// void vpx_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_ssse3(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, // int w, int h); -// void vpx_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride, +// void aom_convolve8_avg_ssse3(const uint8_t *src, ptrdiff_t src_stride, // uint8_t *dst, ptrdiff_t dst_stride, // const int16_t *filter_x, int x_step_q4, // const int16_t *filter_y, int y_step_q4, diff --git a/aom_dsp/x86/vpx_subpixel_8t_sse2.asm b/aom_dsp/x86/aom_subpixel_8t_sse2.asm similarity index 94% rename from aom_dsp/x86/vpx_subpixel_8t_sse2.asm rename to aom_dsp/x86/aom_subpixel_8t_sse2.asm index b197150c53a67e0d6295c3c41c5439ce6d2d2432..535581e46d9a3b053460f59294d1f0b13301a81e 100644 --- a/aom_dsp/x86/vpx_subpixel_8t_sse2.asm +++ b/aom_dsp/x86/aom_subpixel_8t_sse2.asm @@ -176,7 +176,7 @@ movq [rdi + %2], xmm0 %endm -;void vpx_filter_block1d4_v8_sse2 +;void aom_filter_block1d4_v8_sse2 ;( ; unsigned char *src_ptr, ; unsigned int src_pitch, @@ -185,8 +185,8 @@ ; unsigned int output_height, ; short *filter ;) -global sym(vpx_filter_block1d4_v8_sse2) PRIVATE -sym(vpx_filter_block1d4_v8_sse2): +global sym(aom_filter_block1d4_v8_sse2) PRIVATE +sym(aom_filter_block1d4_v8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -243,7 +243,7 @@ sym(vpx_filter_block1d4_v8_sse2): pop rbp ret -;void vpx_filter_block1d8_v8_sse2 +;void aom_filter_block1d8_v8_sse2 ;( ; unsigned char *src_ptr, ; unsigned int src_pitch, @@ -252,8 +252,8 @@ sym(vpx_filter_block1d4_v8_sse2): ; unsigned int output_height, ; short *filter ;) -global sym(vpx_filter_block1d8_v8_sse2) PRIVATE -sym(vpx_filter_block1d8_v8_sse2): +global sym(aom_filter_block1d8_v8_sse2) PRIVATE +sym(aom_filter_block1d8_v8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -302,7 +302,7 @@ sym(vpx_filter_block1d8_v8_sse2): pop rbp ret -;void vpx_filter_block1d16_v8_sse2 +;void aom_filter_block1d16_v8_sse2 ;( ; unsigned char *src_ptr, ; unsigned int src_pitch, @@ -311,8 +311,8 @@ sym(vpx_filter_block1d8_v8_sse2): ; unsigned int output_height, ; short *filter ;) -global sym(vpx_filter_block1d16_v8_sse2) PRIVATE -sym(vpx_filter_block1d16_v8_sse2): +global sym(aom_filter_block1d16_v8_sse2) PRIVATE +sym(aom_filter_block1d16_v8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -365,8 +365,8 @@ sym(vpx_filter_block1d16_v8_sse2): pop rbp ret -global sym(vpx_filter_block1d4_v8_avg_sse2) PRIVATE -sym(vpx_filter_block1d4_v8_avg_sse2): +global sym(aom_filter_block1d4_v8_avg_sse2) PRIVATE +sym(aom_filter_block1d4_v8_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -423,8 +423,8 @@ sym(vpx_filter_block1d4_v8_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d8_v8_avg_sse2) PRIVATE -sym(vpx_filter_block1d8_v8_avg_sse2): +global sym(aom_filter_block1d8_v8_avg_sse2) PRIVATE +sym(aom_filter_block1d8_v8_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -472,8 +472,8 @@ sym(vpx_filter_block1d8_v8_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d16_v8_avg_sse2) PRIVATE -sym(vpx_filter_block1d16_v8_avg_sse2): +global sym(aom_filter_block1d16_v8_avg_sse2) PRIVATE +sym(aom_filter_block1d16_v8_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -525,7 +525,7 @@ sym(vpx_filter_block1d16_v8_avg_sse2): pop rbp ret -;void vpx_filter_block1d4_h8_sse2 +;void aom_filter_block1d4_h8_sse2 ;( ; unsigned char *src_ptr, ; unsigned int src_pixels_per_line, @@ -534,8 +534,8 @@ sym(vpx_filter_block1d16_v8_avg_sse2): ; unsigned int output_height, ; short *filter ;) -global sym(vpx_filter_block1d4_h8_sse2) PRIVATE -sym(vpx_filter_block1d4_h8_sse2): +global sym(aom_filter_block1d4_h8_sse2) PRIVATE +sym(aom_filter_block1d4_h8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -599,7 +599,7 @@ sym(vpx_filter_block1d4_h8_sse2): pop rbp ret -;void vpx_filter_block1d8_h8_sse2 +;void aom_filter_block1d8_h8_sse2 ;( ; unsigned char *src_ptr, ; unsigned int src_pixels_per_line, @@ -608,8 +608,8 @@ sym(vpx_filter_block1d4_h8_sse2): ; unsigned int output_height, ; short *filter ;) -global sym(vpx_filter_block1d8_h8_sse2) PRIVATE -sym(vpx_filter_block1d8_h8_sse2): +global sym(aom_filter_block1d8_h8_sse2) PRIVATE +sym(aom_filter_block1d8_h8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -674,7 +674,7 @@ sym(vpx_filter_block1d8_h8_sse2): pop rbp ret -;void vpx_filter_block1d16_h8_sse2 +;void aom_filter_block1d16_h8_sse2 ;( ; unsigned char *src_ptr, ; unsigned int src_pixels_per_line, @@ -683,8 +683,8 @@ sym(vpx_filter_block1d8_h8_sse2): ; unsigned int output_height, ; short *filter ;) -global sym(vpx_filter_block1d16_h8_sse2) PRIVATE -sym(vpx_filter_block1d16_h8_sse2): +global sym(aom_filter_block1d16_h8_sse2) PRIVATE +sym(aom_filter_block1d16_h8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -769,8 +769,8 @@ sym(vpx_filter_block1d16_h8_sse2): pop rbp ret -global sym(vpx_filter_block1d4_h8_avg_sse2) PRIVATE -sym(vpx_filter_block1d4_h8_avg_sse2): +global sym(aom_filter_block1d4_h8_avg_sse2) PRIVATE +sym(aom_filter_block1d4_h8_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -834,8 +834,8 @@ sym(vpx_filter_block1d4_h8_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d8_h8_avg_sse2) PRIVATE -sym(vpx_filter_block1d8_h8_avg_sse2): +global sym(aom_filter_block1d8_h8_avg_sse2) PRIVATE +sym(aom_filter_block1d8_h8_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -900,8 +900,8 @@ sym(vpx_filter_block1d8_h8_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d16_h8_avg_sse2) PRIVATE -sym(vpx_filter_block1d16_h8_avg_sse2): +global sym(aom_filter_block1d16_h8_avg_sse2) PRIVATE +sym(aom_filter_block1d16_h8_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 diff --git a/aom_dsp/x86/vpx_subpixel_8t_ssse3.asm b/aom_dsp/x86/aom_subpixel_8t_ssse3.asm similarity index 100% rename from aom_dsp/x86/vpx_subpixel_8t_ssse3.asm rename to aom_dsp/x86/aom_subpixel_8t_ssse3.asm diff --git a/aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm similarity index 89% rename from aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm rename to aom_dsp/x86/aom_subpixel_bilinear_sse2.asm index 7de58ff2a9a67293bf8552153d06968082f488b1..78ac1c49f7e7b2a49c7fae089f4b03b6c0c1bdfe 100644 --- a/aom_dsp/x86/vpx_subpixel_bilinear_sse2.asm +++ b/aom_dsp/x86/aom_subpixel_bilinear_sse2.asm @@ -131,8 +131,8 @@ dec rcx %endm -global sym(vpx_filter_block1d4_v2_sse2) PRIVATE -sym(vpx_filter_block1d4_v2_sse2): +global sym(aom_filter_block1d4_v2_sse2) PRIVATE +sym(aom_filter_block1d4_v2_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -155,8 +155,8 @@ sym(vpx_filter_block1d4_v2_sse2): pop rbp ret -global sym(vpx_filter_block1d8_v2_sse2) PRIVATE -sym(vpx_filter_block1d8_v2_sse2): +global sym(aom_filter_block1d8_v2_sse2) PRIVATE +sym(aom_filter_block1d8_v2_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -181,8 +181,8 @@ sym(vpx_filter_block1d8_v2_sse2): pop rbp ret -global sym(vpx_filter_block1d16_v2_sse2) PRIVATE -sym(vpx_filter_block1d16_v2_sse2): +global sym(aom_filter_block1d16_v2_sse2) PRIVATE +sym(aom_filter_block1d16_v2_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -209,8 +209,8 @@ sym(vpx_filter_block1d16_v2_sse2): pop rbp ret -global sym(vpx_filter_block1d4_v2_avg_sse2) PRIVATE -sym(vpx_filter_block1d4_v2_avg_sse2): +global sym(aom_filter_block1d4_v2_avg_sse2) PRIVATE +sym(aom_filter_block1d4_v2_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -233,8 +233,8 @@ sym(vpx_filter_block1d4_v2_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d8_v2_avg_sse2) PRIVATE -sym(vpx_filter_block1d8_v2_avg_sse2): +global sym(aom_filter_block1d8_v2_avg_sse2) PRIVATE +sym(aom_filter_block1d8_v2_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -259,8 +259,8 @@ sym(vpx_filter_block1d8_v2_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d16_v2_avg_sse2) PRIVATE -sym(vpx_filter_block1d16_v2_avg_sse2): +global sym(aom_filter_block1d16_v2_avg_sse2) PRIVATE +sym(aom_filter_block1d16_v2_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -287,8 +287,8 @@ sym(vpx_filter_block1d16_v2_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d4_h2_sse2) PRIVATE -sym(vpx_filter_block1d4_h2_sse2): +global sym(aom_filter_block1d4_h2_sse2) PRIVATE +sym(aom_filter_block1d4_h2_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -312,8 +312,8 @@ sym(vpx_filter_block1d4_h2_sse2): pop rbp ret -global sym(vpx_filter_block1d8_h2_sse2) PRIVATE -sym(vpx_filter_block1d8_h2_sse2): +global sym(aom_filter_block1d8_h2_sse2) PRIVATE +sym(aom_filter_block1d8_h2_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -339,8 +339,8 @@ sym(vpx_filter_block1d8_h2_sse2): pop rbp ret -global sym(vpx_filter_block1d16_h2_sse2) PRIVATE -sym(vpx_filter_block1d16_h2_sse2): +global sym(aom_filter_block1d16_h2_sse2) PRIVATE +sym(aom_filter_block1d16_h2_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -367,8 +367,8 @@ sym(vpx_filter_block1d16_h2_sse2): pop rbp ret -global sym(vpx_filter_block1d4_h2_avg_sse2) PRIVATE -sym(vpx_filter_block1d4_h2_avg_sse2): +global sym(aom_filter_block1d4_h2_avg_sse2) PRIVATE +sym(aom_filter_block1d4_h2_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -392,8 +392,8 @@ sym(vpx_filter_block1d4_h2_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d8_h2_avg_sse2) PRIVATE -sym(vpx_filter_block1d8_h2_avg_sse2): +global sym(aom_filter_block1d8_h2_avg_sse2) PRIVATE +sym(aom_filter_block1d8_h2_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -419,8 +419,8 @@ sym(vpx_filter_block1d8_h2_avg_sse2): pop rbp ret -global sym(vpx_filter_block1d16_h2_avg_sse2) PRIVATE -sym(vpx_filter_block1d16_h2_avg_sse2): +global sym(aom_filter_block1d16_h2_avg_sse2) PRIVATE +sym(aom_filter_block1d16_h2_avg_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 diff --git a/aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm similarity index 88% rename from aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm rename to aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm index 46ad5435eed894dbc1a1658330cb6276582fcabc..043e535426ab0fdd021c5585a67d60182fee065e 100644 --- a/aom_dsp/x86/vpx_subpixel_bilinear_ssse3.asm +++ b/aom_dsp/x86/aom_subpixel_bilinear_ssse3.asm @@ -109,8 +109,8 @@ dec rcx %endm -global sym(vpx_filter_block1d4_v2_ssse3) PRIVATE -sym(vpx_filter_block1d4_v2_ssse3): +global sym(aom_filter_block1d4_v2_ssse3) PRIVATE +sym(aom_filter_block1d4_v2_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -133,8 +133,8 @@ sym(vpx_filter_block1d4_v2_ssse3): pop rbp ret -global sym(vpx_filter_block1d8_v2_ssse3) PRIVATE -sym(vpx_filter_block1d8_v2_ssse3): +global sym(aom_filter_block1d8_v2_ssse3) PRIVATE +sym(aom_filter_block1d8_v2_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -159,8 +159,8 @@ sym(vpx_filter_block1d8_v2_ssse3): pop rbp ret -global sym(vpx_filter_block1d16_v2_ssse3) PRIVATE -sym(vpx_filter_block1d16_v2_ssse3): +global sym(aom_filter_block1d16_v2_ssse3) PRIVATE +sym(aom_filter_block1d16_v2_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -186,8 +186,8 @@ sym(vpx_filter_block1d16_v2_ssse3): pop rbp ret -global sym(vpx_filter_block1d4_v2_avg_ssse3) PRIVATE -sym(vpx_filter_block1d4_v2_avg_ssse3): +global sym(aom_filter_block1d4_v2_avg_ssse3) PRIVATE +sym(aom_filter_block1d4_v2_avg_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -210,8 +210,8 @@ sym(vpx_filter_block1d4_v2_avg_ssse3): pop rbp ret -global sym(vpx_filter_block1d8_v2_avg_ssse3) PRIVATE -sym(vpx_filter_block1d8_v2_avg_ssse3): +global sym(aom_filter_block1d8_v2_avg_ssse3) PRIVATE +sym(aom_filter_block1d8_v2_avg_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -236,8 +236,8 @@ sym(vpx_filter_block1d8_v2_avg_ssse3): pop rbp ret -global sym(vpx_filter_block1d16_v2_avg_ssse3) PRIVATE -sym(vpx_filter_block1d16_v2_avg_ssse3): +global sym(aom_filter_block1d16_v2_avg_ssse3) PRIVATE +sym(aom_filter_block1d16_v2_avg_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -263,8 +263,8 @@ sym(vpx_filter_block1d16_v2_avg_ssse3): pop rbp ret -global sym(vpx_filter_block1d4_h2_ssse3) PRIVATE -sym(vpx_filter_block1d4_h2_ssse3): +global sym(aom_filter_block1d4_h2_ssse3) PRIVATE +sym(aom_filter_block1d4_h2_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -288,8 +288,8 @@ sym(vpx_filter_block1d4_h2_ssse3): pop rbp ret -global sym(vpx_filter_block1d8_h2_ssse3) PRIVATE -sym(vpx_filter_block1d8_h2_ssse3): +global sym(aom_filter_block1d8_h2_ssse3) PRIVATE +sym(aom_filter_block1d8_h2_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -315,8 +315,8 @@ sym(vpx_filter_block1d8_h2_ssse3): pop rbp ret -global sym(vpx_filter_block1d16_h2_ssse3) PRIVATE -sym(vpx_filter_block1d16_h2_ssse3): +global sym(aom_filter_block1d16_h2_ssse3) PRIVATE +sym(aom_filter_block1d16_h2_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -342,8 +342,8 @@ sym(vpx_filter_block1d16_h2_ssse3): pop rbp ret -global sym(vpx_filter_block1d4_h2_avg_ssse3) PRIVATE -sym(vpx_filter_block1d4_h2_avg_ssse3): +global sym(aom_filter_block1d4_h2_avg_ssse3) PRIVATE +sym(aom_filter_block1d4_h2_avg_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -367,8 +367,8 @@ sym(vpx_filter_block1d4_h2_avg_ssse3): pop rbp ret -global sym(vpx_filter_block1d8_h2_avg_ssse3) PRIVATE -sym(vpx_filter_block1d8_h2_avg_ssse3): +global sym(aom_filter_block1d8_h2_avg_ssse3) PRIVATE +sym(aom_filter_block1d8_h2_avg_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -394,8 +394,8 @@ sym(vpx_filter_block1d8_h2_avg_ssse3): pop rbp ret -global sym(vpx_filter_block1d16_h2_avg_ssse3) PRIVATE -sym(vpx_filter_block1d16_h2_avg_ssse3): +global sym(aom_filter_block1d16_h2_avg_ssse3) PRIVATE +sym(aom_filter_block1d16_h2_avg_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 diff --git a/aom_dsp/x86/avg_intrin_sse2.c b/aom_dsp/x86/avg_intrin_sse2.c index b5631bfcaf51d954e85d423020a2f33750288acd..94a579c9b1d38d3f5e6a989765dafb72f59ae4ca 100644 --- a/aom_dsp/x86/avg_intrin_sse2.c +++ b/aom_dsp/x86/avg_intrin_sse2.c @@ -11,10 +11,10 @@ #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" -void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp, +void aom_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp, int *min, int *max) { __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff; u0 = _mm_setzero_si128(); @@ -92,7 +92,7 @@ void vpx_minmax_8x8_sse2(const uint8_t *s, int p, const uint8_t *d, int dp, *min = _mm_extract_epi16(minabsdiff, 0); } -unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) { +unsigned int aom_avg_8x8_sse2(const uint8_t *s, int p) { __m128i s0, s1, u0; unsigned int avg = 0; u0 = _mm_setzero_si128(); @@ -119,7 +119,7 @@ unsigned int vpx_avg_8x8_sse2(const uint8_t *s, int p) { return (avg + 32) >> 6; } -unsigned int vpx_avg_4x4_sse2(const uint8_t *s, int p) { +unsigned int aom_avg_4x4_sse2(const uint8_t *s, int p) { __m128i s0, s1, u0; unsigned int avg = 0; u0 = _mm_setzero_si128(); @@ -213,7 +213,7 @@ static void hadamard_col8_sse2(__m128i *in, int iter) { } } -void vpx_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride, +void aom_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride, int16_t *coeff) { __m128i src[8]; src[0] = _mm_load_si128((const __m128i *)src_diff); @@ -245,13 +245,13 @@ void vpx_hadamard_8x8_sse2(int16_t const *src_diff, int src_stride, _mm_store_si128((__m128i *)coeff, src[7]); } -void vpx_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride, +void aom_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride, int16_t *coeff) { int idx; for (idx = 0; idx < 4; ++idx) { int16_t const *src_ptr = src_diff + (idx >> 1) * 8 * src_stride + (idx & 0x01) * 8; - vpx_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64); + aom_hadamard_8x8_sse2(src_ptr, src_stride, coeff + idx * 64); } for (idx = 0; idx < 64; idx += 8) { @@ -284,7 +284,7 @@ void vpx_hadamard_16x16_sse2(int16_t const *src_diff, int src_stride, } } -int vpx_satd_sse2(const int16_t *coeff, int length) { +int aom_satd_sse2(const int16_t *coeff, int length) { int i; const __m128i zero = _mm_setzero_si128(); __m128i accum = zero; @@ -310,7 +310,7 @@ int vpx_satd_sse2(const int16_t *coeff, int length) { return _mm_cvtsi128_si32(accum); } -void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref, +void aom_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref, const int ref_stride, const int height) { int idx; __m128i zero = _mm_setzero_si128(); @@ -359,7 +359,7 @@ void vpx_int_pro_row_sse2(int16_t *hbuf, uint8_t const *ref, _mm_storeu_si128((__m128i *)hbuf, s1); } -int16_t vpx_int_pro_col_sse2(uint8_t const *ref, const int width) { +int16_t aom_int_pro_col_sse2(uint8_t const *ref, const int width) { __m128i zero = _mm_setzero_si128(); __m128i src_line = _mm_load_si128((const __m128i *)ref); __m128i s0 = _mm_sad_epu8(src_line, zero); @@ -379,7 +379,7 @@ int16_t vpx_int_pro_col_sse2(uint8_t const *ref, const int width) { return _mm_extract_epi16(s0, 0); } -int vpx_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) { +int aom_vector_var_sse2(int16_t const *ref, int16_t const *src, const int bwl) { int idx; int width = 4 << bwl; int16_t mean; diff --git a/aom_dsp/x86/avg_ssse3_x86_64.asm b/aom_dsp/x86/avg_ssse3_x86_64.asm index 26412e8e432bb5b1e7fd0c87f7f7ab822ac02065..8f28874e4f1e94e324bc0b94e1640f433ecfffe7 100644 --- a/aom_dsp/x86/avg_ssse3_x86_64.asm +++ b/aom_dsp/x86/avg_ssse3_x86_64.asm @@ -8,7 +8,7 @@ ; be found in the AUTHORS file in the root of the source tree. ; -%define private_prefix vpx +%define private_prefix aom %include "third_party/x86inc/x86inc.asm" diff --git a/aom_dsp/x86/convolve.h b/aom_dsp/x86/convolve.h index b50359f90e0f5ae536c5ff7951e3148fdd0a7260..cf436543f25f58eb680cc3c644292d958c07bca3 100644 --- a/aom_dsp/x86/convolve.h +++ b/aom_dsp/x86/convolve.h @@ -13,8 +13,8 @@ #include -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #include "aom_ports/mem.h" typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, @@ -22,7 +22,7 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, uint32_t output_height, const int16_t *filter); #define FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \ - void vpx_convolve8_##name##_##opt( \ + void aom_convolve8_##name##_##opt( \ const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ const int16_t *filter_y, int y_step_q4, int w, int h) { \ @@ -30,21 +30,21 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, assert(step_q4 == 16); \ if (filter[0] || filter[1] || filter[2]) { \ while (w >= 16) { \ - vpx_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \ + aom_filter_block1d16_##dir##8_##avg##opt(src_start, src_stride, dst, \ dst_stride, h, filter); \ src += 16; \ dst += 16; \ w -= 16; \ } \ while (w >= 8) { \ - vpx_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst, \ + aom_filter_block1d8_##dir##8_##avg##opt(src_start, src_stride, dst, \ dst_stride, h, filter); \ src += 8; \ dst += 8; \ w -= 8; \ } \ while (w >= 4) { \ - vpx_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst, \ + aom_filter_block1d4_##dir##8_##avg##opt(src_start, src_stride, dst, \ dst_stride, h, filter); \ src += 4; \ dst += 4; \ @@ -52,21 +52,21 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, } \ } else { \ while (w >= 16) { \ - vpx_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst, \ + aom_filter_block1d16_##dir##2_##avg##opt(src, src_stride, dst, \ dst_stride, h, filter); \ src += 16; \ dst += 16; \ w -= 16; \ } \ while (w >= 8) { \ - vpx_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst, \ + aom_filter_block1d8_##dir##2_##avg##opt(src, src_stride, dst, \ dst_stride, h, filter); \ src += 8; \ dst += 8; \ w -= 8; \ } \ while (w >= 4) { \ - vpx_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst, \ + aom_filter_block1d4_##dir##2_##avg##opt(src, src_stride, dst, \ dst_stride, h, filter); \ src += 4; \ dst += 4; \ @@ -76,7 +76,7 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, } #define FUN_CONV_2D(avg, opt) \ - void vpx_convolve8_##avg##opt( \ + void aom_convolve8_##avg##opt( \ const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ const int16_t *filter_y, int y_step_q4, int w, int h) { \ @@ -89,17 +89,17 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch, if (filter_x[0] || filter_x[1] || filter_x[2] || filter_y[0] || \ filter_y[1] || filter_y[2]) { \ DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 71]); \ - vpx_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ + aom_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ filter_x, x_step_q4, filter_y, y_step_q4, w, \ h + 7); \ - vpx_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ + aom_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ filter_x, x_step_q4, filter_y, \ y_step_q4, w, h); \ } else { \ DECLARE_ALIGNED(16, uint8_t, fdata2[64 * 65]); \ - vpx_convolve8_horiz_##opt(src, src_stride, fdata2, 64, filter_x, \ + aom_convolve8_horiz_##opt(src, src_stride, fdata2, 64, filter_x, \ x_step_q4, filter_y, y_step_q4, w, h + 1); \ - vpx_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, filter_x, \ + aom_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, filter_x, \ x_step_q4, filter_y, y_step_q4, w, h); \ } \ } @@ -114,7 +114,7 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr, const int16_t *filter, int bd); #define HIGH_FUN_CONV_1D(name, step_q4, filter, dir, src_start, avg, opt) \ - void vpx_highbd_convolve8_##name##_##opt( \ + void aom_highbd_convolve8_##name##_##opt( \ const uint8_t *src8, ptrdiff_t src_stride, uint8_t *dst8, \ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ const int16_t *filter_y, int y_step_q4, int w, int h, int bd) { \ @@ -123,21 +123,21 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr, uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ if (filter[0] || filter[1] || filter[2]) { \ while (w >= 16) { \ - vpx_highbd_filter_block1d16_##dir##8_##avg##opt( \ + aom_highbd_filter_block1d16_##dir##8_##avg##opt( \ src_start, src_stride, dst, dst_stride, h, filter, bd); \ src += 16; \ dst += 16; \ w -= 16; \ } \ while (w >= 8) { \ - vpx_highbd_filter_block1d8_##dir##8_##avg##opt( \ + aom_highbd_filter_block1d8_##dir##8_##avg##opt( \ src_start, src_stride, dst, dst_stride, h, filter, bd); \ src += 8; \ dst += 8; \ w -= 8; \ } \ while (w >= 4) { \ - vpx_highbd_filter_block1d4_##dir##8_##avg##opt( \ + aom_highbd_filter_block1d4_##dir##8_##avg##opt( \ src_start, src_stride, dst, dst_stride, h, filter, bd); \ src += 4; \ dst += 4; \ @@ -145,21 +145,21 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr, } \ } else { \ while (w >= 16) { \ - vpx_highbd_filter_block1d16_##dir##2_##avg##opt( \ + aom_highbd_filter_block1d16_##dir##2_##avg##opt( \ src, src_stride, dst, dst_stride, h, filter, bd); \ src += 16; \ dst += 16; \ w -= 16; \ } \ while (w >= 8) { \ - vpx_highbd_filter_block1d8_##dir##2_##avg##opt( \ + aom_highbd_filter_block1d8_##dir##2_##avg##opt( \ src, src_stride, dst, dst_stride, h, filter, bd); \ src += 8; \ dst += 8; \ w -= 8; \ } \ while (w >= 4) { \ - vpx_highbd_filter_block1d4_##dir##2_##avg##opt( \ + aom_highbd_filter_block1d4_##dir##2_##avg##opt( \ src, src_stride, dst, dst_stride, h, filter, bd); \ src += 4; \ dst += 4; \ @@ -168,14 +168,14 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr, } \ } \ if (w) { \ - vpx_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \ + aom_highbd_convolve8_##name##_c(src8, src_stride, dst8, dst_stride, \ filter_x, x_step_q4, filter_y, \ y_step_q4, w, h, bd); \ } \ } #define HIGH_FUN_CONV_2D(avg, opt) \ - void vpx_highbd_convolve8_##avg##opt( \ + void aom_highbd_convolve8_##avg##opt( \ const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \ ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, \ const int16_t *filter_y, int y_step_q4, int w, int h, int bd) { \ @@ -185,23 +185,23 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr, if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \ filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \ DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]); \ - vpx_highbd_convolve8_horiz_##opt( \ + aom_highbd_convolve8_horiz_##opt( \ src - 3 * src_stride, src_stride, CONVERT_TO_BYTEPTR(fdata2), 64, \ filter_x, x_step_q4, filter_y, y_step_q4, w, h + 7, bd); \ - vpx_highbd_convolve8_##avg##vert_##opt( \ + aom_highbd_convolve8_##avg##vert_##opt( \ CONVERT_TO_BYTEPTR(fdata2) + 192, 64, dst, dst_stride, filter_x, \ x_step_q4, filter_y, y_step_q4, w, h, bd); \ } else { \ DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]); \ - vpx_highbd_convolve8_horiz_##opt( \ + aom_highbd_convolve8_horiz_##opt( \ src, src_stride, CONVERT_TO_BYTEPTR(fdata2), 64, filter_x, \ x_step_q4, filter_y, y_step_q4, w, h + 1, bd); \ - vpx_highbd_convolve8_##avg##vert_##opt( \ + aom_highbd_convolve8_##avg##vert_##opt( \ CONVERT_TO_BYTEPTR(fdata2), 64, dst, dst_stride, filter_x, \ x_step_q4, filter_y, y_step_q4, w, h, bd); \ } \ } else { \ - vpx_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ + aom_highbd_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ filter_x, x_step_q4, filter_y, y_step_q4, \ w, h, bd); \ } \ diff --git a/aom_dsp/x86/fwd_dct32x32_impl_sse2.h b/aom_dsp/x86/fwd_dct32x32_impl_sse2.h index 337a700042e27abba2748fa2583a6bf0328dcfd1..e63e3759f82d73c97f78dd3edabe6a8a2a2d5714 100644 --- a/aom_dsp/x86/fwd_dct32x32_impl_sse2.h +++ b/aom_dsp/x86/fwd_dct32x32_impl_sse2.h @@ -22,31 +22,31 @@ #define ADD_EPI16 _mm_adds_epi16 #define SUB_EPI16 _mm_subs_epi16 #if FDCT32x32_HIGH_PRECISION -void vpx_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) { +void aom_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) { int i, j; for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i]; - vpx_fdct32(temp_in, temp_out, 0); + aom_fdct32(temp_in, temp_out, 0); for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2); } } -#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_c -#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rows_c +#define HIGH_FDCT32x32_2D_C aom_highbd_fdct32x32_c +#define HIGH_FDCT32x32_2D_ROWS_C aom_fdct32x32_rows_c #else -void vpx_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) { +void aom_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) { int i, j; for (i = 0; i < 32; ++i) { tran_high_t temp_in[32], temp_out[32]; for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i]; - vpx_fdct32(temp_in, temp_out, 1); + aom_fdct32(temp_in, temp_out, 1); for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j]; } } -#define HIGH_FDCT32x32_2D_C vpx_highbd_fdct32x32_rd_c -#define HIGH_FDCT32x32_2D_ROWS_C vpx_fdct32x32_rd_rows_c +#define HIGH_FDCT32x32_2D_C aom_highbd_fdct32x32_rd_c +#define HIGH_FDCT32x32_2D_ROWS_C aom_fdct32x32_rd_rows_c #endif // FDCT32x32_HIGH_PRECISION #else #define ADD_EPI16 _mm_add_epi16 diff --git a/aom_dsp/x86/fwd_txfm_avx2.c b/aom_dsp/x86/fwd_txfm_avx2.c index c94ad4e250b41c984e4c35ca97f3125e2fd723cd..670f864d0764eef883c7abb36a2c8de045474520 100644 --- a/aom_dsp/x86/fwd_txfm_avx2.c +++ b/aom_dsp/x86/fwd_txfm_avx2.c @@ -9,15 +9,15 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" +#include "./aom_config.h" -#define FDCT32x32_2D_AVX2 vpx_fdct32x32_rd_avx2 +#define FDCT32x32_2D_AVX2 aom_fdct32x32_rd_avx2 #define FDCT32x32_HIGH_PRECISION 0 #include "aom_dsp/x86/fwd_dct32x32_impl_avx2.h" #undef FDCT32x32_2D_AVX2 #undef FDCT32x32_HIGH_PRECISION -#define FDCT32x32_2D_AVX2 vpx_fdct32x32_avx2 +#define FDCT32x32_2D_AVX2 aom_fdct32x32_avx2 #define FDCT32x32_HIGH_PRECISION 1 #include "aom_dsp/x86/fwd_dct32x32_impl_avx2.h" // NOLINT #undef FDCT32x32_2D_AVX2 diff --git a/aom_dsp/x86/fwd_txfm_impl_sse2.h b/aom_dsp/x86/fwd_txfm_impl_sse2.h index 2028240eaf09c2c6e23c22ee0e7de5218d4febcd..7bb1db70af17c21b32a8bc15e27387243038e357 100644 --- a/aom_dsp/x86/fwd_txfm_impl_sse2.h +++ b/aom_dsp/x86/fwd_txfm_impl_sse2.h @@ -11,7 +11,7 @@ #include // SSE2 -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/txfm_common.h" #include "aom_dsp/x86/fwd_txfm_sse2.h" #include "aom_dsp/x86/txfm_common_sse2.h" @@ -99,7 +99,7 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { _mm_cmplt_epi16(in1, _mm_set1_epi16(0xfc00))); test = _mm_movemask_epi8(_mm_or_si128(cmp0, cmp1)); if (test) { - vpx_highbd_fdct4x4_c(input, output, stride); + aom_highbd_fdct4x4_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -170,7 +170,7 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&x0, &x1); if (overflow) { - vpx_highbd_fdct4x4_c(input, output, stride); + aom_highbd_fdct4x4_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -192,7 +192,7 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&t0, &t1); if (overflow) { - vpx_highbd_fdct4x4_c(input, output, stride); + aom_highbd_fdct4x4_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -231,7 +231,7 @@ void FDCT4x4_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&x0, &x1); if (overflow) { - vpx_highbd_fdct4x4_c(input, output, stride); + aom_highbd_fdct4x4_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -314,7 +314,7 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7); if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); + aom_highbd_fdct8x8_c(input, output, stride); return; } } @@ -329,7 +329,7 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3); if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); + aom_highbd_fdct8x8_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -372,7 +372,7 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&res0, &res4, &res2, &res6); if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); + aom_highbd_fdct8x8_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -402,7 +402,7 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&r0, &r1); if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); + aom_highbd_fdct8x8_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -415,7 +415,7 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3); if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); + aom_highbd_fdct8x8_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -458,7 +458,7 @@ void FDCT8x8_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&res1, &res7, &res5, &res3); if (overflow) { - vpx_highbd_fdct8x8_c(input, output, stride); + aom_highbd_fdct8x8_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -721,7 +721,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { overflow = check_epi16_overflow_x8(&input0, &input1, &input2, &input3, &input4, &input5, &input6, &input7); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -741,7 +741,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3, &step1_4, &step1_5, &step1_6, &step1_7); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -761,7 +761,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { overflow = check_epi16_overflow_x8(&q0, &q1, &q2, &q3, &q4, &q5, &q6, &q7); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -775,7 +775,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&r0, &r1, &r2, &r3); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -797,7 +797,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&res00, &res08, &res04, &res12); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -818,7 +818,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x2(&r0, &r1); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -831,7 +831,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&x0, &x1, &x2, &x3); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -854,7 +854,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { overflow = check_epi16_overflow_x4(&res02, &res14, &res10, &res06); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -882,7 +882,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { overflow = check_epi16_overflow_x4(&step2_2, &step2_3, &step2_5, &step2_4); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -902,7 +902,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { check_epi16_overflow_x8(&step3_0, &step3_1, &step3_2, &step3_3, &step3_4, &step3_5, &step3_6, &step3_7); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -925,7 +925,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { overflow = check_epi16_overflow_x4(&step2_1, &step2_2, &step2_6, &step2_5); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -945,7 +945,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { check_epi16_overflow_x8(&step1_0, &step1_1, &step1_2, &step1_3, &step1_4, &step1_5, &step1_6, &step1_7); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -967,7 +967,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&res01, &res09, &res15, &res07); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH @@ -988,7 +988,7 @@ void FDCT16x16_2D(const int16_t *input, tran_low_t *output, int stride) { #if DCT_HIGH_BIT_DEPTH overflow = check_epi16_overflow_x4(&res05, &res13, &res11, &res03); if (overflow) { - vpx_highbd_fdct16x16_c(input, output, stride); + aom_highbd_fdct16x16_c(input, output, stride); return; } #endif // DCT_HIGH_BIT_DEPTH diff --git a/aom_dsp/x86/fwd_txfm_sse2.c b/aom_dsp/x86/fwd_txfm_sse2.c index ab159b407ed3197833f3730bec797e88bd98dc35..2afb212e946d0def5fc7d2425e7f34d8d3db26a9 100644 --- a/aom_dsp/x86/fwd_txfm_sse2.c +++ b/aom_dsp/x86/fwd_txfm_sse2.c @@ -11,11 +11,11 @@ #include // SSE2 -#include "./vpx_config.h" -#include "aom_dsp/vpx_dsp_common.h" +#include "./aom_config.h" +#include "aom_dsp/aom_dsp_common.h" #include "aom_dsp/x86/fwd_txfm_sse2.h" -void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) { __m128i in0, in1; __m128i tmp; const __m128i zero = _mm_setzero_si128(); @@ -44,7 +44,7 @@ void vpx_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) { store_output(&in0, output); } -void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { +void aom_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); @@ -84,7 +84,7 @@ void vpx_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) { store_output(&in1, output); } -void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, +void aom_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, int stride) { __m128i in0, in1, in2, in3; __m128i u0, u1; @@ -153,7 +153,7 @@ void vpx_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output, store_output(&in1, output); } -void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, +void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, int stride) { __m128i in0, in1, in2, in3; __m128i u0, u1; @@ -226,21 +226,21 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, } #define DCT_HIGH_BIT_DEPTH 0 -#define FDCT4x4_2D vpx_fdct4x4_sse2 -#define FDCT8x8_2D vpx_fdct8x8_sse2 -#define FDCT16x16_2D vpx_fdct16x16_sse2 +#define FDCT4x4_2D aom_fdct4x4_sse2 +#define FDCT8x8_2D aom_fdct8x8_sse2 +#define FDCT16x16_2D aom_fdct16x16_sse2 #include "aom_dsp/x86/fwd_txfm_impl_sse2.h" #undef FDCT4x4_2D #undef FDCT8x8_2D #undef FDCT16x16_2D -#define FDCT32x32_2D vpx_fdct32x32_rd_sse2 +#define FDCT32x32_2D aom_fdct32x32_rd_sse2 #define FDCT32x32_HIGH_PRECISION 0 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" #undef FDCT32x32_2D #undef FDCT32x32_HIGH_PRECISION -#define FDCT32x32_2D vpx_fdct32x32_sse2 +#define FDCT32x32_2D aom_fdct32x32_sse2 #define FDCT32x32_HIGH_PRECISION 1 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT #undef FDCT32x32_2D @@ -249,21 +249,21 @@ void vpx_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output, #if CONFIG_VPX_HIGHBITDEPTH #define DCT_HIGH_BIT_DEPTH 1 -#define FDCT4x4_2D vpx_highbd_fdct4x4_sse2 -#define FDCT8x8_2D vpx_highbd_fdct8x8_sse2 -#define FDCT16x16_2D vpx_highbd_fdct16x16_sse2 +#define FDCT4x4_2D aom_highbd_fdct4x4_sse2 +#define FDCT8x8_2D aom_highbd_fdct8x8_sse2 +#define FDCT16x16_2D aom_highbd_fdct16x16_sse2 #include "aom_dsp/x86/fwd_txfm_impl_sse2.h" // NOLINT #undef FDCT4x4_2D #undef FDCT8x8_2D #undef FDCT16x16_2D -#define FDCT32x32_2D vpx_highbd_fdct32x32_rd_sse2 +#define FDCT32x32_2D aom_highbd_fdct32x32_rd_sse2 #define FDCT32x32_HIGH_PRECISION 0 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT #undef FDCT32x32_2D #undef FDCT32x32_HIGH_PRECISION -#define FDCT32x32_2D vpx_highbd_fdct32x32_sse2 +#define FDCT32x32_2D aom_highbd_fdct32x32_sse2 #define FDCT32x32_HIGH_PRECISION 1 #include "aom_dsp/x86/fwd_dct32x32_impl_sse2.h" // NOLINT #undef FDCT32x32_2D diff --git a/aom_dsp/x86/halfpix_variance_impl_sse2.asm b/aom_dsp/x86/halfpix_variance_impl_sse2.asm index b91d1dc0da5c0668f1d9d53ee64d12d689b1bcee..66e752e03a9f0a13ac1b4ad857ab72e79a146a8a 100644 --- a/aom_dsp/x86/halfpix_variance_impl_sse2.asm +++ b/aom_dsp/x86/halfpix_variance_impl_sse2.asm @@ -10,15 +10,15 @@ %include "aom_ports/x86_abi_support.asm" -;void vpx_half_horiz_vert_variance16x_h_sse2(unsigned char *ref, +;void aom_half_horiz_vert_variance16x_h_sse2(unsigned char *ref, ; int ref_stride, ; unsigned char *src, ; int src_stride, ; unsigned int height, ; int *sum, ; unsigned int *sumsquared) -global sym(vpx_half_horiz_vert_variance16x_h_sse2) PRIVATE -sym(vpx_half_horiz_vert_variance16x_h_sse2): +global sym(aom_half_horiz_vert_variance16x_h_sse2) PRIVATE +sym(aom_half_horiz_vert_variance16x_h_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 7 @@ -45,7 +45,7 @@ sym(vpx_half_horiz_vert_variance16x_h_sse2): lea rsi, [rsi + rax] -vpx_half_horiz_vert_variance16x_h_1: +aom_half_horiz_vert_variance16x_h_1: movdqu xmm1, XMMWORD PTR [rsi] ; movdqu xmm2, XMMWORD PTR [rsi+1] ; pavgb xmm1, xmm2 ; xmm1 = avg(xmm1,xmm3) horizontal line i+1 @@ -77,7 +77,7 @@ vpx_half_horiz_vert_variance16x_h_1: lea rdi, [rdi + rdx] sub rcx, 1 ; - jnz vpx_half_horiz_vert_variance16x_h_1 ; + jnz aom_half_horiz_vert_variance16x_h_1 ; pxor xmm1, xmm1 pxor xmm5, xmm5 @@ -123,15 +123,15 @@ vpx_half_horiz_vert_variance16x_h_1: ret -;void vpx_half_vert_variance16x_h_sse2(unsigned char *ref, +;void aom_half_vert_variance16x_h_sse2(unsigned char *ref, ; int ref_stride, ; unsigned char *src, ; int src_stride, ; unsigned int height, ; int *sum, ; unsigned int *sumsquared) -global sym(vpx_half_vert_variance16x_h_sse2) PRIVATE -sym(vpx_half_vert_variance16x_h_sse2): +global sym(aom_half_vert_variance16x_h_sse2) PRIVATE +sym(aom_half_vert_variance16x_h_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 7 @@ -154,7 +154,7 @@ sym(vpx_half_vert_variance16x_h_sse2): lea rsi, [rsi + rax ] pxor xmm0, xmm0 -vpx_half_vert_variance16x_h_1: +aom_half_vert_variance16x_h_1: movdqu xmm3, XMMWORD PTR [rsi] pavgb xmm5, xmm3 ; xmm5 = avg(xmm1,xmm3) @@ -182,7 +182,7 @@ vpx_half_vert_variance16x_h_1: lea rdi, [rdi + rdx] sub rcx, 1 - jnz vpx_half_vert_variance16x_h_1 + jnz aom_half_vert_variance16x_h_1 pxor xmm1, xmm1 pxor xmm5, xmm5 @@ -228,15 +228,15 @@ vpx_half_vert_variance16x_h_1: ret -;void vpx_half_horiz_variance16x_h_sse2(unsigned char *ref, +;void aom_half_horiz_variance16x_h_sse2(unsigned char *ref, ; int ref_stride ; unsigned char *src, ; int src_stride, ; unsigned int height, ; int *sum, ; unsigned int *sumsquared) -global sym(vpx_half_horiz_variance16x_h_sse2) PRIVATE -sym(vpx_half_horiz_variance16x_h_sse2): +global sym(aom_half_horiz_variance16x_h_sse2) PRIVATE +sym(aom_half_horiz_variance16x_h_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 7 @@ -257,7 +257,7 @@ sym(vpx_half_horiz_variance16x_h_sse2): pxor xmm0, xmm0 ; -vpx_half_horiz_variance16x_h_1: +aom_half_horiz_variance16x_h_1: movdqu xmm5, XMMWORD PTR [rsi] ; xmm5 = s0,s1,s2..s15 movdqu xmm3, XMMWORD PTR [rsi+1] ; xmm3 = s1,s2,s3..s16 @@ -284,7 +284,7 @@ vpx_half_horiz_variance16x_h_1: lea rdi, [rdi + rdx] sub rcx, 1 ; - jnz vpx_half_horiz_variance16x_h_1 ; + jnz aom_half_horiz_variance16x_h_1 ; pxor xmm1, xmm1 pxor xmm5, xmm5 @@ -335,7 +335,7 @@ align 16 xmm_bi_rd: times 8 dw 64 align 16 -vpx_bilinear_filters_sse2: +aom_bilinear_filters_sse2: dw 128, 128, 128, 128, 128, 128, 128, 128, 0, 0, 0, 0, 0, 0, 0, 0 dw 112, 112, 112, 112, 112, 112, 112, 112, 16, 16, 16, 16, 16, 16, 16, 16 dw 96, 96, 96, 96, 96, 96, 96, 96, 32, 32, 32, 32, 32, 32, 32, 32 diff --git a/aom_dsp/x86/halfpix_variance_sse2.c b/aom_dsp/x86/halfpix_variance_sse2.c index 15cce503c656dcf82a393571b10b59814166fb65..31152b971e79b6fc0f8ca99ac4fd8ae6f18dd2f3 100644 --- a/aom_dsp/x86/halfpix_variance_sse2.c +++ b/aom_dsp/x86/halfpix_variance_sse2.c @@ -9,59 +9,59 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" -void vpx_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref, +void aom_half_horiz_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride, const unsigned char *src, int src_stride, unsigned int height, int *sum, unsigned int *sumsquared); -void vpx_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride, +void aom_half_horiz_variance16x_h_sse2(const unsigned char *ref, int ref_stride, const unsigned char *src, int src_stride, unsigned int height, int *sum, unsigned int *sumsquared); -void vpx_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride, +void aom_half_vert_variance16x_h_sse2(const unsigned char *ref, int ref_stride, const unsigned char *src, int src_stride, unsigned int height, int *sum, unsigned int *sumsquared); -uint32_t vpx_variance_halfpixvar16x16_h_sse2(const unsigned char *src, +uint32_t aom_variance_halfpixvar16x16_h_sse2(const unsigned char *src, int src_stride, const unsigned char *dst, int dst_stride, uint32_t *sse) { int xsum0; unsigned int xxsum0; - vpx_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, + aom_half_horiz_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0, &xxsum0); *sse = xxsum0; return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); } -uint32_t vpx_variance_halfpixvar16x16_v_sse2(const unsigned char *src, +uint32_t aom_variance_halfpixvar16x16_v_sse2(const unsigned char *src, int src_stride, const unsigned char *dst, int dst_stride, uint32_t *sse) { int xsum0; unsigned int xxsum0; - vpx_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0, + aom_half_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0, &xxsum0); *sse = xxsum0; return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); } -uint32_t vpx_variance_halfpixvar16x16_hv_sse2(const unsigned char *src, +uint32_t aom_variance_halfpixvar16x16_hv_sse2(const unsigned char *src, int src_stride, const unsigned char *dst, int dst_stride, uint32_t *sse) { int xsum0; unsigned int xxsum0; - vpx_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, + aom_half_horiz_vert_variance16x_h_sse2(src, src_stride, dst, dst_stride, 16, &xsum0, &xxsum0); *sse = xxsum0; diff --git a/aom_dsp/x86/highbd_loopfilter_sse2.c b/aom_dsp/x86/highbd_loopfilter_sse2.c index f12bb5489a016cf6209edf0e50885f63c221332d..f55e8ce93a1bc00953069595d4d4e2a32ba7e0ad 100644 --- a/aom_dsp/x86/highbd_loopfilter_sse2.c +++ b/aom_dsp/x86/highbd_loopfilter_sse2.c @@ -11,7 +11,7 @@ #include // SSE2 -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" #include "aom_ports/emmintrin_compat.h" @@ -488,7 +488,7 @@ static void highbd_mb_lpf_horizontal_edge_w_sse2_16(uint16_t *s, int p, } // TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly. -void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p, +void aom_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p, const uint8_t *_blimit, const uint8_t *_limit, const uint8_t *_thresh, int count, @@ -499,7 +499,7 @@ void vpx_highbd_lpf_horizontal_16_sse2(uint16_t *s, int p, highbd_mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh, bd); } -void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, +void aom_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, const uint8_t *_blimit, const uint8_t *_limit, const uint8_t *_thresh, int count, @@ -659,7 +659,7 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, filt = _mm_adds_epi16(filt, work_a); filt = _mm_adds_epi16(filt, work_a); filt = _mm_adds_epi16(filt, work_a); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = signed_char_clamp_bd_sse2(filt, bd); filt = _mm_and_si128(filt, mask); @@ -728,16 +728,16 @@ void vpx_highbd_lpf_horizontal_8_sse2(uint16_t *s, int p, _mm_store_si128((__m128i *)(s + 2 * p), q2); } -void vpx_highbd_lpf_horizontal_8_dual_sse2( +void aom_highbd_lpf_horizontal_8_dual_sse2( uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1, const uint8_t *_thresh1, int bd) { - vpx_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); - vpx_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1, + aom_highbd_lpf_horizontal_8_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); + aom_highbd_lpf_horizontal_8_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1, bd); } -void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, +void aom_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, const uint8_t *_blimit, const uint8_t *_limit, const uint8_t *_thresh, int count, @@ -857,7 +857,7 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, filt = _mm_adds_epi16(filt, work_a); filt = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, work_a), bd); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = signed_char_clamp_bd_sse2(_mm_adds_epi16(filt, t4), bd); @@ -902,12 +902,12 @@ void vpx_highbd_lpf_horizontal_4_sse2(uint16_t *s, int p, _mm_storeu_si128((__m128i *)(s + 1 * p), q1); } -void vpx_highbd_lpf_horizontal_4_dual_sse2( +void aom_highbd_lpf_horizontal_4_dual_sse2( uint16_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0, const uint8_t *_blimit1, const uint8_t *_limit1, const uint8_t *_thresh1, int bd) { - vpx_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); - vpx_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1, + aom_highbd_lpf_horizontal_4_sse2(s, p, _blimit0, _limit0, _thresh0, 1, bd); + aom_highbd_lpf_horizontal_4_sse2(s + 8, p, _blimit1, _limit1, _thresh1, 1, bd); } @@ -1023,7 +1023,7 @@ static INLINE void highbd_transpose8x16(uint16_t *in0, uint16_t *in1, int in_p, highbd_transpose(src1, in_p, dest1, out_p, 1); } -void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit, +void aom_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]); @@ -1038,7 +1038,7 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit, highbd_transpose(src, p, dst, 8, 1); // Loop filtering - vpx_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, + aom_highbd_lpf_horizontal_4_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, bd); src[0] = t_dst; @@ -1048,7 +1048,7 @@ void vpx_highbd_lpf_vertical_4_sse2(uint16_t *s, int p, const uint8_t *blimit, highbd_transpose(src, 8, dst, p, 1); } -void vpx_highbd_lpf_vertical_4_dual_sse2( +void aom_highbd_lpf_vertical_4_dual_sse2( uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd) { @@ -1060,7 +1060,7 @@ void vpx_highbd_lpf_vertical_4_dual_sse2( highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); // Loop filtering - vpx_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, + aom_highbd_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, blimit1, limit1, thresh1, bd); src[0] = t_dst; src[1] = t_dst + 8; @@ -1071,7 +1071,7 @@ void vpx_highbd_lpf_vertical_4_dual_sse2( highbd_transpose(src, 16, dst, p, 2); } -void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit, +void aom_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int count, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 8]); @@ -1086,7 +1086,7 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit, highbd_transpose(src, p, dst, 8, 1); // Loop filtering - vpx_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, + aom_highbd_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1, bd); src[0] = t_dst; @@ -1096,7 +1096,7 @@ void vpx_highbd_lpf_vertical_8_sse2(uint16_t *s, int p, const uint8_t *blimit, highbd_transpose(src, 8, dst, p, 1); } -void vpx_highbd_lpf_vertical_8_dual_sse2( +void aom_highbd_lpf_vertical_8_dual_sse2( uint16_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd) { @@ -1108,7 +1108,7 @@ void vpx_highbd_lpf_vertical_8_dual_sse2( highbd_transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); // Loop filtering - vpx_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, + aom_highbd_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, blimit1, limit1, thresh1, bd); src[0] = t_dst; src[1] = t_dst + 8; @@ -1120,7 +1120,7 @@ void vpx_highbd_lpf_vertical_8_dual_sse2( highbd_transpose(src, 16, dst, p, 2); } -void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit, +void aom_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd) { DECLARE_ALIGNED(16, uint16_t, t_dst[8 * 16]); @@ -1147,7 +1147,7 @@ void vpx_highbd_lpf_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit, highbd_transpose(src, 8, dst, p, 2); } -void vpx_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p, +void aom_highbd_lpf_vertical_16_dual_sse2(uint16_t *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd) { diff --git a/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/aom_dsp/x86/highbd_quantize_intrin_sse2.c index d7d13aa58913343c73a3ff6f76bfee122b968513..975dde788b24e961387f7dcbe6e07d0b50895ad2 100644 --- a/aom_dsp/x86/highbd_quantize_intrin_sse2.c +++ b/aom_dsp/x86/highbd_quantize_intrin_sse2.c @@ -11,12 +11,12 @@ #include -#include "aom_dsp/vpx_dsp_common.h" -#include "aom_mem/vpx_mem.h" +#include "aom_dsp/aom_dsp_common.h" +#include "aom_mem/aom_mem.h" #include "aom_ports/mem.h" #if CONFIG_VPX_HIGHBITDEPTH -void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count, +void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, @@ -92,7 +92,7 @@ void vpx_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count, *eob_ptr = eob_i + 1; } -void vpx_highbd_quantize_b_32x32_sse2( +void aom_highbd_quantize_b_32x32_sse2( const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, diff --git a/aom_dsp/x86/highbd_sad4d_sse2.asm b/aom_dsp/x86/highbd_sad4d_sse2.asm index 6c2a61e0197ea2d4e9b3ba99b4b052632bc1acbd..54501d1fe17f7311c4331ca013811404424cea71 100644 --- a/aom_dsp/x86/highbd_sad4d_sse2.asm +++ b/aom_dsp/x86/highbd_sad4d_sse2.asm @@ -209,7 +209,7 @@ SECTION .text HIGH_PROCESS_32x2x4 0, %4, %5, (%4 + 32), (%5 + 32), %6 %endmacro -; void vpx_highbd_sadNxNx4d_sse2(uint8_t *src, int src_stride, +; void aom_highbd_sadNxNx4d_sse2(uint8_t *src, int src_stride, ; uint8_t *ref[4], int ref_stride, ; uint32_t res[4]); ; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8 diff --git a/aom_dsp/x86/highbd_sad_sse2.asm b/aom_dsp/x86/highbd_sad_sse2.asm index bc4b28db24fb260bdc0a0df9f56ab885c3b59446..2da8c83d87b0f17c0a55a4339eabbb9dcad74d7d 100644 --- a/aom_dsp/x86/highbd_sad_sse2.asm +++ b/aom_dsp/x86/highbd_sad_sse2.asm @@ -50,7 +50,7 @@ cglobal highbd_sad%1x%2_avg, 5, ARCH_X86_64 + %3, 7, src, src_stride, \ %endif %endmacro -; unsigned int vpx_highbd_sad64x{16,32,64}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_highbd_sad64x{16,32,64}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro HIGH_SAD64XN 1-2 0 HIGH_SAD_FN 64, %1, 5, %2 @@ -157,7 +157,7 @@ HIGH_SAD64XN 64, 1 ; highbd_sad64x64_avg_sse2 HIGH_SAD64XN 32, 1 ; highbd_sad64x32_avg_sse2 -; unsigned int vpx_highbd_sad32x{16,32,64}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_highbd_sad32x{16,32,64}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro HIGH_SAD32XN 1-2 0 HIGH_SAD_FN 32, %1, 5, %2 @@ -225,7 +225,7 @@ HIGH_SAD32XN 64, 1 ; highbd_sad32x64_avg_sse2 HIGH_SAD32XN 32, 1 ; highbd_sad32x32_avg_sse2 HIGH_SAD32XN 16, 1 ; highbd_sad32x16_avg_sse2 -; unsigned int vpx_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_highbd_sad16x{8,16,32}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro HIGH_SAD16XN 1-2 0 HIGH_SAD_FN 16, %1, 5, %2 @@ -294,7 +294,7 @@ HIGH_SAD16XN 16, 1 ; highbd_sad16x16_avg_sse2 HIGH_SAD16XN 8, 1 ; highbd_sad16x8_avg_sse2 -; unsigned int vpx_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_highbd_sad8x{4,8,16}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro HIGH_SAD8XN 1-2 0 HIGH_SAD_FN 8, %1, 7, %2 diff --git a/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm b/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm index 30ee81b68831c2bdeb90c51bb3e24b56b66524e5..1175742f9c75cb29d6c8480b65ddc82afdecb639 100644 --- a/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm +++ b/aom_dsp/x86/highbd_subpel_variance_impl_sse2.asm @@ -30,7 +30,7 @@ bilin_filter_m_sse2: times 8 dw 16 SECTION .text -; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride, +; int aom_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride, ; int x_offset, int y_offset, ; const uint8_t *dst, ptrdiff_t dst_stride, ; int height, unsigned int *sse); diff --git a/aom_dsp/x86/highbd_variance_impl_sse2.asm b/aom_dsp/x86/highbd_variance_impl_sse2.asm index 1bf3abbf34f6ae0dd5d18b2df8ef235090803950..3abb44f88194460eb102b6dcc13e7a8e9f57e4cd 100644 --- a/aom_dsp/x86/highbd_variance_impl_sse2.asm +++ b/aom_dsp/x86/highbd_variance_impl_sse2.asm @@ -11,7 +11,7 @@ %include "aom_ports/x86_abi_support.asm" -;unsigned int vpx_highbd_calc16x16var_sse2 +;unsigned int aom_highbd_calc16x16var_sse2 ;( ; unsigned char * src_ptr, ; int source_stride, @@ -20,8 +20,8 @@ ; unsigned int * SSE, ; int * Sum ;) -global sym(vpx_highbd_calc16x16var_sse2) PRIVATE -sym(vpx_highbd_calc16x16var_sse2): +global sym(aom_highbd_calc16x16var_sse2) PRIVATE +sym(aom_highbd_calc16x16var_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -164,7 +164,7 @@ sym(vpx_highbd_calc16x16var_sse2): ret -;unsigned int vpx_highbd_calc8x8var_sse2 +;unsigned int aom_highbd_calc8x8var_sse2 ;( ; unsigned char * src_ptr, ; int source_stride, @@ -173,8 +173,8 @@ sym(vpx_highbd_calc16x16var_sse2): ; unsigned int * SSE, ; int * Sum ;) -global sym(vpx_highbd_calc8x8var_sse2) PRIVATE -sym(vpx_highbd_calc8x8var_sse2): +global sym(aom_highbd_calc8x8var_sse2) PRIVATE +sym(aom_highbd_calc8x8var_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 diff --git a/aom_dsp/x86/highbd_variance_sse2.c b/aom_dsp/x86/highbd_variance_sse2.c index 10684eb7cde30d9461c13b1f2dd5312304239897..2d4bd3d1bce0206f902e3bca21f8aa8e411eab6c 100644 --- a/aom_dsp/x86/highbd_variance_sse2.c +++ b/aom_dsp/x86/highbd_variance_sse2.c @@ -8,7 +8,7 @@ * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_config.h" +#include "./aom_config.h" #include "aom_ports/mem.h" @@ -16,11 +16,11 @@ typedef uint32_t (*high_variance_fn_t)(const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride, uint32_t *sse, int *sum); -uint32_t vpx_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride, +uint32_t aom_highbd_calc8x8var_sse2(const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride, uint32_t *sse, int *sum); -uint32_t vpx_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride, +uint32_t aom_highbd_calc16x16var_sse2(const uint16_t *src, int src_stride, const uint16_t *ref, int ref_stride, uint32_t *sse, int *sum); @@ -90,32 +90,32 @@ static void highbd_12_variance_sse2(const uint16_t *src, int src_stride, } #define HIGH_GET_VAR(S) \ - void vpx_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ + void aom_highbd_get##S##x##S##var_sse2(const uint8_t *src8, int src_stride, \ const uint8_t *ref8, int ref_stride, \ uint32_t *sse, int *sum) { \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ + aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ sum); \ } \ \ - void vpx_highbd_10_get##S##x##S##var_sse2( \ + void aom_highbd_10_get##S##x##S##var_sse2( \ const uint8_t *src8, int src_stride, const uint8_t *ref8, \ int ref_stride, uint32_t *sse, int *sum) { \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ + aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ sum); \ *sum = ROUND_POWER_OF_TWO(*sum, 2); \ *sse = ROUND_POWER_OF_TWO(*sse, 4); \ } \ \ - void vpx_highbd_12_get##S##x##S##var_sse2( \ + void aom_highbd_12_get##S##x##S##var_sse2( \ const uint8_t *src8, int src_stride, const uint8_t *ref8, \ int ref_stride, uint32_t *sse, int *sum) { \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ - vpx_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ + aom_highbd_calc##S##x##S##var_sse2(src, src_stride, ref, ref_stride, sse, \ sum); \ *sum = ROUND_POWER_OF_TWO(*sum, 4); \ *sse = ROUND_POWER_OF_TWO(*sse, 8); \ @@ -129,7 +129,7 @@ HIGH_GET_VAR(8) #undef HIGH_GET_VAR #define VAR_FN(w, h, block_size, shift) \ - uint32_t vpx_highbd_8_variance##w##x##h##_sse2( \ + uint32_t aom_highbd_8_variance##w##x##h##_sse2( \ const uint8_t *src8, int src_stride, const uint8_t *ref8, \ int ref_stride, uint32_t *sse) { \ int sum; \ @@ -137,11 +137,11 @@ HIGH_GET_VAR(8) uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ highbd_8_variance_sse2( \ src, src_stride, ref, ref_stride, w, h, sse, &sum, \ - vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ + aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ return *sse - (((int64_t)sum * sum) >> shift); \ } \ \ - uint32_t vpx_highbd_10_variance##w##x##h##_sse2( \ + uint32_t aom_highbd_10_variance##w##x##h##_sse2( \ const uint8_t *src8, int src_stride, const uint8_t *ref8, \ int ref_stride, uint32_t *sse) { \ int sum; \ @@ -149,11 +149,11 @@ HIGH_GET_VAR(8) uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ highbd_10_variance_sse2( \ src, src_stride, ref, ref_stride, w, h, sse, &sum, \ - vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ + aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ return *sse - (((int64_t)sum * sum) >> shift); \ } \ \ - uint32_t vpx_highbd_12_variance##w##x##h##_sse2( \ + uint32_t aom_highbd_12_variance##w##x##h##_sse2( \ const uint8_t *src8, int src_stride, const uint8_t *ref8, \ int ref_stride, uint32_t *sse) { \ int sum; \ @@ -161,7 +161,7 @@ HIGH_GET_VAR(8) uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); \ highbd_12_variance_sse2( \ src, src_stride, ref, ref_stride, w, h, sse, &sum, \ - vpx_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ + aom_highbd_calc##block_size##x##block_size##var_sse2, block_size); \ return *sse - (((int64_t)sum * sum) >> shift); \ } @@ -180,69 +180,69 @@ VAR_FN(8, 8, 8, 6) #undef VAR_FN -unsigned int vpx_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride, +unsigned int aom_highbd_8_mse16x16_sse2(const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, - vpx_highbd_calc16x16var_sse2, 16); + aom_highbd_calc16x16var_sse2, 16); return *sse; } -unsigned int vpx_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride, +unsigned int aom_highbd_10_mse16x16_sse2(const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, - vpx_highbd_calc16x16var_sse2, 16); + aom_highbd_calc16x16var_sse2, 16); return *sse; } -unsigned int vpx_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride, +unsigned int aom_highbd_12_mse16x16_sse2(const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, - vpx_highbd_calc16x16var_sse2, 16); + aom_highbd_calc16x16var_sse2, 16); return *sse; } -unsigned int vpx_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride, +unsigned int aom_highbd_8_mse8x8_sse2(const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); highbd_8_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum, - vpx_highbd_calc8x8var_sse2, 8); + aom_highbd_calc8x8var_sse2, 8); return *sse; } -unsigned int vpx_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride, +unsigned int aom_highbd_10_mse8x8_sse2(const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); highbd_10_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum, - vpx_highbd_calc8x8var_sse2, 8); + aom_highbd_calc8x8var_sse2, 8); return *sse; } -unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride, +unsigned int aom_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride, const uint8_t *ref8, int ref_stride, unsigned int *sse) { int sum; uint16_t *src = CONVERT_TO_SHORTPTR(src8); uint16_t *ref = CONVERT_TO_SHORTPTR(ref8); highbd_12_variance_sse2(src, src_stride, ref, ref_stride, 8, 8, sse, &sum, - vpx_highbd_calc8x8var_sse2, 8); + aom_highbd_calc8x8var_sse2, 8); return *sse; } @@ -251,7 +251,7 @@ unsigned int vpx_highbd_12_mse8x8_sse2(const uint8_t *src8, int src_stride, // These definitions are for functions defined in // highbd_subpel_variance_impl_sse2.asm #define DECL(w, opt) \ - int vpx_highbd_sub_pixel_variance##w##xh_##opt( \ + int aom_highbd_sub_pixel_variance##w##xh_##opt( \ const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \ const uint16_t *dst, ptrdiff_t dst_stride, int height, \ unsigned int *sse, void *unused0, void *unused); @@ -266,29 +266,29 @@ DECLS(sse2, sse); #undef DECL #define FN(w, h, wf, wlog2, hlog2, opt, cast) \ - uint32_t vpx_highbd_8_sub_pixel_variance##w##x##h##_##opt( \ + uint32_t aom_highbd_8_sub_pixel_variance##w##x##h##_##opt( \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ uint32_t sse; \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + int se = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \ NULL); \ if (w > wf) { \ unsigned int sse2; \ - int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \ &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \ &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \ &sse2, NULL, NULL); \ se += se2; \ @@ -299,29 +299,29 @@ DECLS(sse2, sse); return sse - ((cast se * se) >> (wlog2 + hlog2)); \ } \ \ - uint32_t vpx_highbd_10_sub_pixel_variance##w##x##h##_##opt( \ + uint32_t aom_highbd_10_sub_pixel_variance##w##x##h##_##opt( \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ uint32_t sse; \ uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ - int se = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + int se = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src, src_stride, x_offset, y_offset, dst, dst_stride, h, &sse, NULL, \ NULL); \ if (w > wf) { \ uint32_t sse2; \ - int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, h, \ &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, h, \ &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, h, \ &sse2, NULL, NULL); \ se += se2; \ @@ -334,7 +334,7 @@ DECLS(sse2, sse); return sse - ((cast se * se) >> (wlog2 + hlog2)); \ } \ \ - uint32_t vpx_highbd_12_sub_pixel_variance##w##x##h##_##opt( \ + uint32_t aom_highbd_12_sub_pixel_variance##w##x##h##_##opt( \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr) { \ int start_row; \ @@ -346,27 +346,27 @@ DECLS(sse2, sse); for (start_row = 0; start_row < h; start_row += 16) { \ uint32_t sse2; \ int height = h - start_row < 16 ? h - start_row : 16; \ - int se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + int se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + (start_row * src_stride), src_stride, x_offset, y_offset, \ dst + (start_row * dst_stride), dst_stride, height, &sse2, NULL, \ NULL); \ se += se2; \ long_sse += sse2; \ if (w > wf) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 16 + (start_row * src_stride), src_stride, x_offset, \ y_offset, dst + 16 + (start_row * dst_stride), dst_stride, height, \ &sse2, NULL, NULL); \ se += se2; \ long_sse += sse2; \ if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 32 + (start_row * src_stride), src_stride, x_offset, \ y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \ height, &sse2, NULL, NULL); \ se += se2; \ long_sse += sse2; \ - se2 = vpx_highbd_sub_pixel_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_variance##wf##xh_##opt( \ src + 48 + (start_row * src_stride), src_stride, x_offset, \ y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \ height, &sse2, NULL, NULL); \ @@ -401,7 +401,7 @@ FNS(sse2, sse); // The 2 unused parameters are place holders for PIC enabled build. #define DECL(w, opt) \ - int vpx_highbd_sub_pixel_avg_variance##w##xh_##opt( \ + int aom_highbd_sub_pixel_avg_variance##w##xh_##opt( \ const uint16_t *src, ptrdiff_t src_stride, int x_offset, int y_offset, \ const uint16_t *dst, ptrdiff_t dst_stride, const uint16_t *sec, \ ptrdiff_t sec_stride, int height, unsigned int *sse, void *unused0, \ @@ -415,7 +415,7 @@ DECLS(sse2); #undef DECLS #define FN(w, h, wf, wlog2, hlog2, opt, cast) \ - uint32_t vpx_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \ + uint32_t aom_highbd_8_sub_pixel_avg_variance##w##x##h##_##opt( \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ const uint8_t *sec8) { \ @@ -423,23 +423,23 @@ DECLS(sse2); uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ - int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + int se = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \ NULL, NULL); \ if (w > wf) { \ uint32_t sse2; \ - int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \ sec + 16, w, h, &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \ sec + 32, w, h, &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \ sec + 48, w, h, &sse2, NULL, NULL); \ se += se2; \ @@ -450,7 +450,7 @@ DECLS(sse2); return sse - ((cast se * se) >> (wlog2 + hlog2)); \ } \ \ - uint32_t vpx_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \ + uint32_t aom_highbd_10_sub_pixel_avg_variance##w##x##h##_##opt( \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ const uint8_t *sec8) { \ @@ -458,23 +458,23 @@ DECLS(sse2); uint16_t *src = CONVERT_TO_SHORTPTR(src8); \ uint16_t *dst = CONVERT_TO_SHORTPTR(dst8); \ uint16_t *sec = CONVERT_TO_SHORTPTR(sec8); \ - int se = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + int se = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src, src_stride, x_offset, y_offset, dst, dst_stride, sec, w, h, &sse, \ NULL, NULL); \ if (w > wf) { \ uint32_t sse2; \ - int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 16, src_stride, x_offset, y_offset, dst + 16, dst_stride, \ sec + 16, w, h, &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, \ sec + 32, w, h, &sse2, NULL, NULL); \ se += se2; \ sse += sse2; \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 48, src_stride, x_offset, y_offset, dst + 48, dst_stride, \ sec + 48, w, h, &sse2, NULL, NULL); \ se += se2; \ @@ -487,7 +487,7 @@ DECLS(sse2); return sse - ((cast se * se) >> (wlog2 + hlog2)); \ } \ \ - uint32_t vpx_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \ + uint32_t aom_highbd_12_sub_pixel_avg_variance##w##x##h##_##opt( \ const uint8_t *src8, int src_stride, int x_offset, int y_offset, \ const uint8_t *dst8, int dst_stride, uint32_t *sse_ptr, \ const uint8_t *sec8) { \ @@ -501,27 +501,27 @@ DECLS(sse2); for (start_row = 0; start_row < h; start_row += 16) { \ uint32_t sse2; \ int height = h - start_row < 16 ? h - start_row : 16; \ - int se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + int se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + (start_row * src_stride), src_stride, x_offset, y_offset, \ dst + (start_row * dst_stride), dst_stride, sec + (start_row * w), \ w, height, &sse2, NULL, NULL); \ se += se2; \ long_sse += sse2; \ if (w > wf) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 16 + (start_row * src_stride), src_stride, x_offset, \ y_offset, dst + 16 + (start_row * dst_stride), dst_stride, \ sec + 16 + (start_row * w), w, height, &sse2, NULL, NULL); \ se += se2; \ long_sse += sse2; \ if (w > wf * 2) { \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 32 + (start_row * src_stride), src_stride, x_offset, \ y_offset, dst + 32 + (start_row * dst_stride), dst_stride, \ sec + 32 + (start_row * w), w, height, &sse2, NULL, NULL); \ se += se2; \ long_sse += sse2; \ - se2 = vpx_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ + se2 = aom_highbd_sub_pixel_avg_variance##wf##xh_##opt( \ src + 48 + (start_row * src_stride), src_stride, x_offset, \ y_offset, dst + 48 + (start_row * dst_stride), dst_stride, \ sec + 48 + (start_row * w), w, height, &sse2, NULL, NULL); \ diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c index 1fa7051a178d315b6b94cbc35dc000de2afbd845..1a8359fe024acc10f05da7ee2005e761d3c01765 100644 --- a/aom_dsp/x86/inv_txfm_sse2.c +++ b/aom_dsp/x86/inv_txfm_sse2.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_dsp/x86/inv_txfm_sse2.h" #include "aom_dsp/x86/txfm_common_sse2.h" @@ -22,7 +22,7 @@ *(int *)(dest) = _mm_cvtsi128_si32(d0); \ } -void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i zero = _mm_setzero_si128(); const __m128i eight = _mm_set1_epi16(8); @@ -153,7 +153,7 @@ void vpx_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct4x4_1_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { __m128i dc_value; const __m128i zero = _mm_setzero_si128(); @@ -449,7 +449,7 @@ void iadst4_sse2(__m128i *in) { out7 = _mm_subs_epi16(stp1_0, stp2_7); \ } -void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i zero = _mm_setzero_si128(); const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); @@ -481,7 +481,7 @@ void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, // 2-D for (i = 0; i < 2; i++) { - // 8x8 Transpose is copied from vpx_fdct8x8_sse2() + // 8x8 Transpose is copied from aom_fdct8x8_sse2() TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5, in6, in7); @@ -519,7 +519,7 @@ void vpx_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, RECON_AND_STORE(dest + 7 * stride, in7); } -void vpx_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct8x8_1_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { __m128i dc_value; const __m128i zero = _mm_setzero_si128(); @@ -557,7 +557,7 @@ void idct8_sse2(__m128i *in) { __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7; __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; - // 8x8 Transpose is copied from vpx_fdct8x8_sse2() + // 8x8 Transpose is copied from aom_fdct8x8_sse2() TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0, in1, in2, in3, in4, in5, in6, in7); @@ -794,7 +794,7 @@ void iadst8_sse2(__m128i *in) { in[7] = _mm_sub_epi16(k__const_0, s1); } -void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i zero = _mm_setzero_si128(); const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); @@ -1164,7 +1164,7 @@ void vpx_idct8x8_12_add_sse2(const tran_low_t *input, uint8_t *dest, stp2_12) \ } -void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); const __m128i final_rounding = _mm_set1_epi16(1 << 5); @@ -1289,7 +1289,7 @@ void vpx_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct16x16_1_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { __m128i dc_value; const __m128i zero = _mm_setzero_si128(); @@ -2148,7 +2148,7 @@ void iadst16_sse2(__m128i *in0, __m128i *in1) { iadst16_8col(in1); } -void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); const __m128i final_rounding = _mm_set1_epi16(1 << 5); @@ -3022,7 +3022,7 @@ void vpx_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest, } // Only upper-left 8x8 has non-zero coeff -void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); const __m128i final_rounding = _mm_set1_epi16(1 << 5); @@ -3179,7 +3179,7 @@ void vpx_idct32x32_34_add_sse2(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING); const __m128i final_rounding = _mm_set1_epi16(1 << 5); @@ -3453,7 +3453,7 @@ void vpx_idct32x32_1024_add_sse2(const tran_low_t *input, uint8_t *dest, } } -void vpx_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest, +void aom_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest, int stride) { __m128i dc_value; const __m128i zero = _mm_setzero_si128(); @@ -3487,7 +3487,7 @@ static INLINE __m128i clamp_high_sse2(__m128i value, int bd) { return retval; } -void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[4 * 4]; tran_low_t *outptr = out; @@ -3550,7 +3550,7 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, } else { // Run the un-optimised row transform for (i = 0; i < 4; ++i) { - vpx_highbd_idct4_c(input, outptr, bd); + aom_highbd_idct4_c(input, outptr, bd); input += 4; outptr += 4; } @@ -3593,7 +3593,7 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, // Columns for (i = 0; i < 4; ++i) { for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i]; - vpx_highbd_idct4_c(temp_in, temp_out, bd); + aom_highbd_idct4_c(temp_in, temp_out, bd); for (j = 0; j < 4; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd); @@ -3602,7 +3602,7 @@ void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[8 * 8]; tran_low_t *outptr = out; @@ -3667,7 +3667,7 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, } else { // Run the un-optimised row transform for (i = 0; i < 8; ++i) { - vpx_highbd_idct8_c(input, outptr, bd); + aom_highbd_idct8_c(input, outptr, bd); input += 8; outptr += 8; } @@ -3693,7 +3693,7 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, tran_low_t temp_in[8], temp_out[8]; for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - vpx_highbd_idct8_c(temp_in, temp_out, bd); + aom_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); @@ -3702,7 +3702,7 @@ void vpx_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[8 * 8] = { 0 }; tran_low_t *outptr = out; @@ -3770,7 +3770,7 @@ void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, } else { // Run the un-optimised row transform for (i = 0; i < 4; ++i) { - vpx_highbd_idct8_c(input, outptr, bd); + aom_highbd_idct8_c(input, outptr, bd); input += 8; outptr += 8; } @@ -3796,7 +3796,7 @@ void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, tran_low_t temp_in[8], temp_out[8]; for (i = 0; i < 8; ++i) { for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i]; - vpx_highbd_idct8_c(temp_in, temp_out, bd); + aom_highbd_idct8_c(temp_in, temp_out, bd); for (j = 0; j < 8; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd); @@ -3805,7 +3805,7 @@ void vpx_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[16 * 16]; tran_low_t *outptr = out; @@ -3878,7 +3878,7 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, } else { // Run the un-optimised row transform for (i = 0; i < 16; ++i) { - vpx_highbd_idct16_c(input, outptr, bd); + aom_highbd_idct16_c(input, outptr, bd); input += 16; outptr += 16; } @@ -3909,7 +3909,7 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, tran_low_t temp_in[16], temp_out[16]; for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; - vpx_highbd_idct16_c(temp_in, temp_out, bd); + aom_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); @@ -3918,7 +3918,7 @@ void vpx_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8, } } -void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, +void aom_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, int stride, int bd) { tran_low_t out[16 * 16] = { 0 }; tran_low_t *outptr = out; @@ -3996,7 +3996,7 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, } else { // Run the un-optimised row transform for (i = 0; i < 4; ++i) { - vpx_highbd_idct16_c(input, outptr, bd); + aom_highbd_idct16_c(input, outptr, bd); input += 16; outptr += 16; } @@ -4027,7 +4027,7 @@ void vpx_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8, tran_low_t temp_in[16], temp_out[16]; for (i = 0; i < 16; ++i) { for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i]; - vpx_highbd_idct16_c(temp_in, temp_out, bd); + aom_highbd_idct16_c(temp_in, temp_out, bd); for (j = 0; j < 16; ++j) { dest[j * stride + i] = highbd_clip_pixel_add( dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd); diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h index 1ed4c7b609ddd570029a63d55d368cb6b6cce91e..ddb680fc9de7c7f37c786890ff1dbe754c329c08 100644 --- a/aom_dsp/x86/inv_txfm_sse2.h +++ b/aom_dsp/x86/inv_txfm_sse2.h @@ -13,8 +13,8 @@ #define VPX_DSP_X86_INV_TXFM_SSE2_H_ #include // SSE2 -#include "./vpx_config.h" -#include "aom/vpx_integer.h" +#include "./aom_config.h" +#include "aom/aom_integer.h" #include "aom_dsp/inv_txfm.h" #include "aom_dsp/x86/txfm_common_sse2.h" diff --git a/aom_dsp/x86/loopfilter_avx2.c b/aom_dsp/x86/loopfilter_avx2.c index 0ae728ac5299ec00dc9bad7292e330b367ca9822..53990a84a76437bd41e6a22973809152c9e7767c 100644 --- a/aom_dsp/x86/loopfilter_avx2.c +++ b/aom_dsp/x86/loopfilter_avx2.c @@ -11,7 +11,7 @@ #include /* AVX2 */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" static void mb_lpf_horizontal_edge_w_avx2_8(unsigned char *s, int p, @@ -102,7 +102,7 @@ static void mb_lpf_horizontal_edge_w_avx2_8(unsigned char *s, int p, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ + /* (aom_filter + 3 * (qs0 - ps0)) & mask */ filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -481,7 +481,7 @@ static void mb_lpf_horizontal_edge_w_avx2_16(unsigned char *s, int p, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - /* (vpx_filter + 3 * (qs0 - ps0)) & mask */ + /* (aom_filter + 3 * (qs0 - ps0)) & mask */ filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -913,7 +913,7 @@ static void mb_lpf_horizontal_edge_w_avx2_16(unsigned char *s, int p, } } -void vpx_lpf_horizontal_16_avx2(unsigned char *s, int p, +void aom_lpf_horizontal_16_avx2(unsigned char *s, int p, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh, int count) { diff --git a/aom_dsp/x86/loopfilter_mmx.asm b/aom_dsp/x86/loopfilter_mmx.asm index dd47c99f34df63bd399014b9a108e8efd7fccecf..23777391892d572c0145f0b3c0d942dc9fc5fedf 100644 --- a/aom_dsp/x86/loopfilter_mmx.asm +++ b/aom_dsp/x86/loopfilter_mmx.asm @@ -12,7 +12,7 @@ %include "aom_ports/x86_abi_support.asm" -;void vpx_lpf_horizontal_4_mmx +;void aom_lpf_horizontal_4_mmx ;( ; unsigned char *src_ptr, ; int src_pixel_step, @@ -21,8 +21,8 @@ ; const char *thresh, ; int count ;) -global sym(vpx_lpf_horizontal_4_mmx) PRIVATE -sym(vpx_lpf_horizontal_4_mmx): +global sym(aom_lpf_horizontal_4_mmx) PRIVATE +sym(aom_lpf_horizontal_4_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -224,7 +224,7 @@ sym(vpx_lpf_horizontal_4_mmx): ret -;void vpx_lpf_vertical_4_mmx +;void aom_lpf_vertical_4_mmx ;( ; unsigned char *src_ptr, ; int src_pixel_step, @@ -233,8 +233,8 @@ sym(vpx_lpf_horizontal_4_mmx): ; const char *thresh, ; int count ;) -global sym(vpx_lpf_vertical_4_mmx) PRIVATE -sym(vpx_lpf_vertical_4_mmx): +global sym(aom_lpf_vertical_4_mmx) PRIVATE +sym(aom_lpf_vertical_4_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 diff --git a/aom_dsp/x86/loopfilter_sse2.c b/aom_dsp/x86/loopfilter_sse2.c index dcccd92402cd925a8830a6a1879f3c8cc11fd35d..d77077b326b692f0a99b01ea1a7094a51edeaa9f 100644 --- a/aom_dsp/x86/loopfilter_sse2.c +++ b/aom_dsp/x86/loopfilter_sse2.c @@ -11,7 +11,7 @@ #include // SSE2 -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" #include "aom_ports/emmintrin_compat.h" @@ -99,7 +99,7 @@ static void mb_lpf_horizontal_edge_w_sse2_8(unsigned char *s, int p, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -492,7 +492,7 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, int p, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); filter2 = _mm_adds_epi8(filt, t3); @@ -714,7 +714,7 @@ static void mb_lpf_horizontal_edge_w_sse2_16(unsigned char *s, int p, } // TODO(yunqingwang): remove count and call these 2 functions(8 or 16) directly. -void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p, +void aom_lpf_horizontal_16_sse2(unsigned char *s, int p, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh, int count) { @@ -724,7 +724,7 @@ void vpx_lpf_horizontal_16_sse2(unsigned char *s, int p, mb_lpf_horizontal_edge_w_sse2_16(s, p, _blimit, _limit, _thresh); } -void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, +void aom_lpf_horizontal_8_sse2(unsigned char *s, int p, const unsigned char *_blimit, const unsigned char *_limit, const unsigned char *_thresh, int count) { @@ -869,7 +869,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -938,7 +938,7 @@ void vpx_lpf_horizontal_8_sse2(unsigned char *s, int p, } } -void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0, +void aom_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0, const uint8_t *_limit0, const uint8_t *_thresh0, const uint8_t *_blimit1, @@ -1105,7 +1105,7 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -1180,7 +1180,7 @@ void vpx_lpf_horizontal_8_dual_sse2(uint8_t *s, int p, const uint8_t *_blimit0, } } -void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, +void aom_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, const unsigned char *_blimit0, const unsigned char *_limit0, const unsigned char *_thresh0, @@ -1274,7 +1274,7 @@ void vpx_lpf_horizontal_4_dual_sse2(unsigned char *s, int p, filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); filt = _mm_adds_epi8(filt, work_a); - // (vpx_filter + 3 * (qs0 - ps0)) & mask + // (aom_filter + 3 * (qs0 - ps0)) & mask filt = _mm_and_si128(filt, mask); filter1 = _mm_adds_epi8(filt, t4); @@ -1460,7 +1460,7 @@ static INLINE void transpose(unsigned char *src[], int in_p, } while (++idx8x8 < num_8x8_to_transpose); } -void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { @@ -1472,7 +1472,7 @@ void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); // Loop filtering - vpx_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, + aom_lpf_horizontal_4_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, blimit1, limit1, thresh1); src[0] = t_dst; src[1] = t_dst + 8; @@ -1483,7 +1483,7 @@ void vpx_lpf_vertical_4_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, transpose(src, 16, dst, p, 2); } -void vpx_lpf_vertical_8_sse2(unsigned char *s, int p, +void aom_lpf_vertical_8_sse2(unsigned char *s, int p, const unsigned char *blimit, const unsigned char *limit, const unsigned char *thresh, int count) { @@ -1499,7 +1499,7 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p, transpose(src, p, dst, 8, 1); // Loop filtering - vpx_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1); + aom_lpf_horizontal_8_sse2(t_dst + 4 * 8, 8, blimit, limit, thresh, 1); src[0] = t_dst; dst[0] = s - 4; @@ -1508,7 +1508,7 @@ void vpx_lpf_vertical_8_sse2(unsigned char *s, int p, transpose(src, 8, dst, p, 1); } -void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, +void aom_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1) { @@ -1520,7 +1520,7 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, transpose8x16(s - 4, s - 4 + p * 8, p, t_dst, 16); // Loop filtering - vpx_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, + aom_lpf_horizontal_8_dual_sse2(t_dst + 4 * 16, 16, blimit0, limit0, thresh0, blimit1, limit1, thresh1); src[0] = t_dst; src[1] = t_dst + 8; @@ -1532,7 +1532,7 @@ void vpx_lpf_vertical_8_dual_sse2(uint8_t *s, int p, const uint8_t *blimit0, transpose(src, 16, dst, p, 2); } -void vpx_lpf_vertical_16_sse2(unsigned char *s, int p, +void aom_lpf_vertical_16_sse2(unsigned char *s, int p, const unsigned char *blimit, const unsigned char *limit, const unsigned char *thresh) { @@ -1560,7 +1560,7 @@ void vpx_lpf_vertical_16_sse2(unsigned char *s, int p, transpose(src, 8, dst, p, 2); } -void vpx_lpf_vertical_16_dual_sse2(unsigned char *s, int p, +void aom_lpf_vertical_16_dual_sse2(unsigned char *s, int p, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh) { DECLARE_ALIGNED(16, unsigned char, t_dst[256]); diff --git a/aom_dsp/x86/quantize_sse2.c b/aom_dsp/x86/quantize_sse2.c index 843290c335125a180b26491ee6071f22e1a36bfa..3a2655fe3cfc667e4cf0cb07349ca26f8d7fc0e1 100644 --- a/aom_dsp/x86/quantize_sse2.c +++ b/aom_dsp/x86/quantize_sse2.c @@ -12,8 +12,8 @@ #include #include -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" static INLINE __m128i load_coefficients(const tran_low_t* coeff_ptr) { #if CONFIG_VPX_HIGHBITDEPTH @@ -41,7 +41,7 @@ static INLINE void store_coefficients(__m128i coeff_vals, #endif } -void vpx_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs, +void aom_quantize_b_sse2(const tran_low_t* coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr, const int16_t* quant_ptr, const int16_t* quant_shift_ptr, tran_low_t* qcoeff_ptr, diff --git a/aom_dsp/x86/sad4d_avx2.c b/aom_dsp/x86/sad4d_avx2.c index 9e3207a9c23bbc0d7c317321d9f9cb28a9471322..686ee2436b448c5f89cb0fb619c6fbd176fb1ed5 100644 --- a/aom_dsp/x86/sad4d_avx2.c +++ b/aom_dsp/x86/sad4d_avx2.c @@ -9,10 +9,10 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ #include // AVX2 -#include "./vpx_dsp_rtcd.h" -#include "aom/vpx_integer.h" +#include "./aom_dsp_rtcd.h" +#include "aom/aom_integer.h" -void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride, +void aom_sad32x32x4d_avx2(const uint8_t *src, int src_stride, const uint8_t *const ref[4], int ref_stride, uint32_t res[4]) { __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg; @@ -80,7 +80,7 @@ void vpx_sad32x32x4d_avx2(const uint8_t *src, int src_stride, } } -void vpx_sad64x64x4d_avx2(const uint8_t *src, int src_stride, +void aom_sad64x64x4d_avx2(const uint8_t *src, int src_stride, const uint8_t *const ref[4], int ref_stride, uint32_t res[4]) { __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg; diff --git a/aom_dsp/x86/sad4d_sse2.asm b/aom_dsp/x86/sad4d_sse2.asm index a2f0ae79e3a86d844cac3d37c95a84b4febebabb..cf0b6a6d7279cd08424e094ec3cc9c39f8936c22 100644 --- a/aom_dsp/x86/sad4d_sse2.asm +++ b/aom_dsp/x86/sad4d_sse2.asm @@ -167,7 +167,7 @@ SECTION .text PROCESS_32x2x4 0, %4, %5, %4 + 32, %5 + 32, %6 %endmacro -; void vpx_sadNxNx4d_sse2(uint8_t *src, int src_stride, +; void aom_sadNxNx4d_sse2(uint8_t *src, int src_stride, ; uint8_t *ref[4], int ref_stride, ; uint32_t res[4]); ; where NxN = 64x64, 32x32, 16x16, 16x8, 8x16 or 8x8 diff --git a/aom_dsp/x86/sad_avx2.c b/aom_dsp/x86/sad_avx2.c index cf5143965deeb29b5eba88578a76a0ea3232ffa7..6ce61fd7f8b13ad8858600efd93a1258278709ab 100644 --- a/aom_dsp/x86/sad_avx2.c +++ b/aom_dsp/x86/sad_avx2.c @@ -9,11 +9,11 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ #include -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" #define FSAD64_H(h) \ - unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride, \ + unsigned int aom_sad64x##h##_avx2(const uint8_t *src_ptr, int src_stride, \ const uint8_t *ref_ptr, int ref_stride) { \ int i, res; \ __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ @@ -41,7 +41,7 @@ } #define FSAD32_H(h) \ - unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride, \ + unsigned int aom_sad32x##h##_avx2(const uint8_t *src_ptr, int src_stride, \ const uint8_t *ref_ptr, int ref_stride) { \ int i, res; \ __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ @@ -92,7 +92,7 @@ FSAD32 #undef FSAD32_H #define FSADAVG64_H(h) \ - unsigned int vpx_sad64x##h##_avg_avx2( \ + unsigned int aom_sad64x##h##_avg_avx2( \ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \ int ref_stride, const uint8_t *second_pred) { \ int i, res; \ @@ -126,7 +126,7 @@ FSAD32 } #define FSADAVG32_H(h) \ - unsigned int vpx_sad32x##h##_avg_avx2( \ + unsigned int aom_sad32x##h##_avg_avx2( \ const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, \ int ref_stride, const uint8_t *second_pred) { \ int i, res; \ diff --git a/aom_dsp/x86/sad_mmx.asm b/aom_dsp/x86/sad_mmx.asm index 9a64416bb91587e32570560f5e6bfa66fccfb398..02acd281b6be47a8da3519307a733144d74eb052 100644 --- a/aom_dsp/x86/sad_mmx.asm +++ b/aom_dsp/x86/sad_mmx.asm @@ -11,18 +11,18 @@ %include "aom_ports/x86_abi_support.asm" -global sym(vpx_sad16x16_mmx) PRIVATE -global sym(vpx_sad8x16_mmx) PRIVATE -global sym(vpx_sad8x8_mmx) PRIVATE -global sym(vpx_sad4x4_mmx) PRIVATE -global sym(vpx_sad16x8_mmx) PRIVATE +global sym(aom_sad16x16_mmx) PRIVATE +global sym(aom_sad8x16_mmx) PRIVATE +global sym(aom_sad8x8_mmx) PRIVATE +global sym(aom_sad4x4_mmx) PRIVATE +global sym(aom_sad16x8_mmx) PRIVATE -;unsigned int vpx_sad16x16_mmx( +;unsigned int aom_sad16x16_mmx( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride) -sym(vpx_sad16x16_mmx): +sym(aom_sad16x16_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 4 @@ -109,12 +109,12 @@ sym(vpx_sad16x16_mmx): ret -;unsigned int vpx_sad8x16_mmx( +;unsigned int aom_sad8x16_mmx( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride) -sym(vpx_sad8x16_mmx): +sym(aom_sad8x16_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 4 @@ -181,12 +181,12 @@ sym(vpx_sad8x16_mmx): ret -;unsigned int vpx_sad8x8_mmx( +;unsigned int aom_sad8x8_mmx( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride) -sym(vpx_sad8x8_mmx): +sym(aom_sad8x8_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 4 @@ -251,12 +251,12 @@ sym(vpx_sad8x8_mmx): ret -;unsigned int vpx_sad4x4_mmx( +;unsigned int aom_sad4x4_mmx( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride) -sym(vpx_sad4x4_mmx): +sym(aom_sad4x4_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 4 @@ -340,12 +340,12 @@ sym(vpx_sad4x4_mmx): ret -;unsigned int vpx_sad16x8_mmx( +;unsigned int aom_sad16x8_mmx( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride) -sym(vpx_sad16x8_mmx): +sym(aom_sad16x8_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 4 diff --git a/aom_dsp/x86/sad_sse2.asm b/aom_dsp/x86/sad_sse2.asm index 1ec906c236047794ccd9ceaf7ef7fb869226c2ba..c914c42724dcdeeaaad5fa2a970ad7aa5f3d8591 100644 --- a/aom_dsp/x86/sad_sse2.asm +++ b/aom_dsp/x86/sad_sse2.asm @@ -44,7 +44,7 @@ cglobal sad%1x%2_avg, 5, ARCH_X86_64 + %3, 6, src, src_stride, \ %endif ; %3 == 7 %endmacro -; unsigned int vpx_sad64x64_sse2(uint8_t *src, int src_stride, +; unsigned int aom_sad64x64_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro SAD64XN 1-2 0 SAD_FN 64, %1, 5, %2 @@ -87,7 +87,7 @@ SAD64XN 32 ; sad64x32_sse2 SAD64XN 64, 1 ; sad64x64_avg_sse2 SAD64XN 32, 1 ; sad64x32_avg_sse2 -; unsigned int vpx_sad32x32_sse2(uint8_t *src, int src_stride, +; unsigned int aom_sad32x32_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro SAD32XN 1-2 0 SAD_FN 32, %1, 5, %2 @@ -132,7 +132,7 @@ SAD32XN 64, 1 ; sad32x64_avg_sse2 SAD32XN 32, 1 ; sad32x32_avg_sse2 SAD32XN 16, 1 ; sad32x16_avg_sse2 -; unsigned int vpx_sad16x{8,16}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_sad16x{8,16}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro SAD16XN 1-2 0 SAD_FN 16, %1, 7, %2 @@ -178,7 +178,7 @@ SAD16XN 32, 1 ; sad16x32_avg_sse2 SAD16XN 16, 1 ; sad16x16_avg_sse2 SAD16XN 8, 1 ; sad16x8_avg_sse2 -; unsigned int vpx_sad8x{8,16}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_sad8x{8,16}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro SAD8XN 1-2 0 SAD_FN 8, %1, 7, %2 @@ -222,7 +222,7 @@ SAD8XN 16, 1 ; sad8x16_avg_sse2 SAD8XN 8, 1 ; sad8x8_avg_sse2 SAD8XN 4, 1 ; sad8x4_avg_sse2 -; unsigned int vpx_sad4x{4, 8}_sse2(uint8_t *src, int src_stride, +; unsigned int aom_sad4x{4, 8}_sse2(uint8_t *src, int src_stride, ; uint8_t *ref, int ref_stride); %macro SAD4XN 1-2 0 SAD_FN 4, %1, 7, %2 diff --git a/aom_dsp/x86/sad_sse3.asm b/aom_dsp/x86/sad_sse3.asm index 4665fb96d229b051822d96580d1fbfc17885d476..1de346ec458b32572e7a122b552a9029de12dd19 100644 --- a/aom_dsp/x86/sad_sse3.asm +++ b/aom_dsp/x86/sad_sse3.asm @@ -165,14 +165,14 @@ paddw mm7, mm3 %endmacro -;void int vpx_sad16x16x3_sse3( +;void int aom_sad16x16x3_sse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad16x16x3_sse3) PRIVATE -sym(vpx_sad16x16x3_sse3): +global sym(aom_sad16x16x3_sse3) PRIVATE +sym(aom_sad16x16x3_sse3): STACK_FRAME_CREATE_X3 @@ -207,14 +207,14 @@ sym(vpx_sad16x16x3_sse3): STACK_FRAME_DESTROY_X3 -;void int vpx_sad16x8x3_sse3( +;void int aom_sad16x8x3_sse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad16x8x3_sse3) PRIVATE -sym(vpx_sad16x8x3_sse3): +global sym(aom_sad16x8x3_sse3) PRIVATE +sym(aom_sad16x8x3_sse3): STACK_FRAME_CREATE_X3 @@ -245,14 +245,14 @@ sym(vpx_sad16x8x3_sse3): STACK_FRAME_DESTROY_X3 -;void int vpx_sad8x16x3_sse3( +;void int aom_sad8x16x3_sse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad8x16x3_sse3) PRIVATE -sym(vpx_sad8x16x3_sse3): +global sym(aom_sad8x16x3_sse3) PRIVATE +sym(aom_sad8x16x3_sse3): STACK_FRAME_CREATE_X3 @@ -274,14 +274,14 @@ sym(vpx_sad8x16x3_sse3): STACK_FRAME_DESTROY_X3 -;void int vpx_sad8x8x3_sse3( +;void int aom_sad8x8x3_sse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad8x8x3_sse3) PRIVATE -sym(vpx_sad8x8x3_sse3): +global sym(aom_sad8x8x3_sse3) PRIVATE +sym(aom_sad8x8x3_sse3): STACK_FRAME_CREATE_X3 @@ -299,14 +299,14 @@ sym(vpx_sad8x8x3_sse3): STACK_FRAME_DESTROY_X3 -;void int vpx_sad4x4x3_sse3( +;void int aom_sad4x4x3_sse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad4x4x3_sse3) PRIVATE -sym(vpx_sad4x4x3_sse3): +global sym(aom_sad4x4x3_sse3) PRIVATE +sym(aom_sad4x4x3_sse3): STACK_FRAME_CREATE_X3 diff --git a/aom_dsp/x86/sad_sse4.asm b/aom_dsp/x86/sad_sse4.asm index 07e28b48a2d9ad3ed50a113f5378799b54fb95a7..fd2c70b1fbaf751d627f1bca936b467c6c69624d 100644 --- a/aom_dsp/x86/sad_sse4.asm +++ b/aom_dsp/x86/sad_sse4.asm @@ -165,14 +165,14 @@ movdqa [rdi + 16], xmm2 %endmacro -;void vpx_sad16x16x8_sse4_1( +;void aom_sad16x16x8_sse4_1( ; const unsigned char *src_ptr, ; int src_stride, ; const unsigned char *ref_ptr, ; int ref_stride, ; unsigned short *sad_array); -global sym(vpx_sad16x16x8_sse4_1) PRIVATE -sym(vpx_sad16x16x8_sse4_1): +global sym(aom_sad16x16x8_sse4_1) PRIVATE +sym(aom_sad16x16x8_sse4_1): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -205,15 +205,15 @@ sym(vpx_sad16x16x8_sse4_1): ret -;void vpx_sad16x8x8_sse4_1( +;void aom_sad16x8x8_sse4_1( ; const unsigned char *src_ptr, ; int src_stride, ; const unsigned char *ref_ptr, ; int ref_stride, ; unsigned short *sad_array ;); -global sym(vpx_sad16x8x8_sse4_1) PRIVATE -sym(vpx_sad16x8x8_sse4_1): +global sym(aom_sad16x8x8_sse4_1) PRIVATE +sym(aom_sad16x8x8_sse4_1): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -242,15 +242,15 @@ sym(vpx_sad16x8x8_sse4_1): ret -;void vpx_sad8x8x8_sse4_1( +;void aom_sad8x8x8_sse4_1( ; const unsigned char *src_ptr, ; int src_stride, ; const unsigned char *ref_ptr, ; int ref_stride, ; unsigned short *sad_array ;); -global sym(vpx_sad8x8x8_sse4_1) PRIVATE -sym(vpx_sad8x8x8_sse4_1): +global sym(aom_sad8x8x8_sse4_1) PRIVATE +sym(aom_sad8x8x8_sse4_1): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -279,15 +279,15 @@ sym(vpx_sad8x8x8_sse4_1): ret -;void vpx_sad8x16x8_sse4_1( +;void aom_sad8x16x8_sse4_1( ; const unsigned char *src_ptr, ; int src_stride, ; const unsigned char *ref_ptr, ; int ref_stride, ; unsigned short *sad_array ;); -global sym(vpx_sad8x16x8_sse4_1) PRIVATE -sym(vpx_sad8x16x8_sse4_1): +global sym(aom_sad8x16x8_sse4_1) PRIVATE +sym(aom_sad8x16x8_sse4_1): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -320,15 +320,15 @@ sym(vpx_sad8x16x8_sse4_1): ret -;void vpx_sad4x4x8_sse4_1( +;void aom_sad4x4x8_sse4_1( ; const unsigned char *src_ptr, ; int src_stride, ; const unsigned char *ref_ptr, ; int ref_stride, ; unsigned short *sad_array ;); -global sym(vpx_sad4x4x8_sse4_1) PRIVATE -sym(vpx_sad4x4x8_sse4_1): +global sym(aom_sad4x4x8_sse4_1) PRIVATE +sym(aom_sad4x4x8_sse4_1): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 diff --git a/aom_dsp/x86/sad_ssse3.asm b/aom_dsp/x86/sad_ssse3.asm index 8315f97fec43b0e91dbce020e2d86ef0c071756f..b1c97ea87025fd5981e4ffa3ffaa91fbb68c8f8a 100644 --- a/aom_dsp/x86/sad_ssse3.asm +++ b/aom_dsp/x86/sad_ssse3.asm @@ -146,14 +146,14 @@ %endmacro -;void int vpx_sad16x16x3_ssse3( +;void int aom_sad16x16x3_ssse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad16x16x3_ssse3) PRIVATE -sym(vpx_sad16x16x3_ssse3): +global sym(aom_sad16x16x3_ssse3) PRIVATE +sym(aom_sad16x16x3_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -169,31 +169,31 @@ sym(vpx_sad16x16x3_ssse3): mov rdx, 0xf and rdx, rdi - jmp .vpx_sad16x16x3_ssse3_skiptable -.vpx_sad16x16x3_ssse3_jumptable: - dd .vpx_sad16x16x3_ssse3_aligned_by_0 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_1 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_2 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_3 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_4 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_5 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_6 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_7 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_8 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_9 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_10 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_11 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_12 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_13 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_14 - .vpx_sad16x16x3_ssse3_do_jump - dd .vpx_sad16x16x3_ssse3_aligned_by_15 - .vpx_sad16x16x3_ssse3_do_jump -.vpx_sad16x16x3_ssse3_skiptable: - - call .vpx_sad16x16x3_ssse3_do_jump -.vpx_sad16x16x3_ssse3_do_jump: + jmp .aom_sad16x16x3_ssse3_skiptable +.aom_sad16x16x3_ssse3_jumptable: + dd .aom_sad16x16x3_ssse3_aligned_by_0 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_1 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_2 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_3 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_4 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_5 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_6 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_7 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_8 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_9 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_10 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_11 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_12 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_13 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_14 - .aom_sad16x16x3_ssse3_do_jump + dd .aom_sad16x16x3_ssse3_aligned_by_15 - .aom_sad16x16x3_ssse3_do_jump +.aom_sad16x16x3_ssse3_skiptable: + + call .aom_sad16x16x3_ssse3_do_jump +.aom_sad16x16x3_ssse3_do_jump: pop rcx ; get the address of do_jump - mov rax, .vpx_sad16x16x3_ssse3_jumptable - .vpx_sad16x16x3_ssse3_do_jump - add rax, rcx ; get the absolute address of vpx_sad16x16x3_ssse3_jumptable + mov rax, .aom_sad16x16x3_ssse3_jumptable - .aom_sad16x16x3_ssse3_do_jump + add rax, rcx ; get the absolute address of aom_sad16x16x3_ssse3_jumptable movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable add rcx, rax @@ -203,23 +203,23 @@ sym(vpx_sad16x16x3_ssse3): jmp rcx - PROCESS_16X16X3_OFFSET 0, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 1, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 2, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 3, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 4, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 5, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 6, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 7, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 8, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 9, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 10, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 11, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 12, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 13, .vpx_sad16x16x3_ssse3 - PROCESS_16X16X3_OFFSET 14, .vpx_sad16x16x3_ssse3 - -.vpx_sad16x16x3_ssse3_aligned_by_15: + PROCESS_16X16X3_OFFSET 0, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 1, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 2, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 3, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 4, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 5, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 6, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 7, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 8, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 9, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 10, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 11, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 12, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 13, .aom_sad16x16x3_ssse3 + PROCESS_16X16X3_OFFSET 14, .aom_sad16x16x3_ssse3 + +.aom_sad16x16x3_ssse3_aligned_by_15: PROCESS_16X2X3 1 PROCESS_16X2X3 0 PROCESS_16X2X3 0 @@ -229,7 +229,7 @@ sym(vpx_sad16x16x3_ssse3): PROCESS_16X2X3 0 PROCESS_16X2X3 0 -.vpx_sad16x16x3_ssse3_store_off: +.aom_sad16x16x3_ssse3_store_off: mov rdi, arg(4) ;Results movq xmm0, xmm5 @@ -259,14 +259,14 @@ sym(vpx_sad16x16x3_ssse3): pop rbp ret -;void int vpx_sad16x8x3_ssse3( +;void int aom_sad16x8x3_ssse3( ; unsigned char *src_ptr, ; int src_stride, ; unsigned char *ref_ptr, ; int ref_stride, ; int *results) -global sym(vpx_sad16x8x3_ssse3) PRIVATE -sym(vpx_sad16x8x3_ssse3): +global sym(aom_sad16x8x3_ssse3) PRIVATE +sym(aom_sad16x8x3_ssse3): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 5 @@ -282,31 +282,31 @@ sym(vpx_sad16x8x3_ssse3): mov rdx, 0xf and rdx, rdi - jmp .vpx_sad16x8x3_ssse3_skiptable -.vpx_sad16x8x3_ssse3_jumptable: - dd .vpx_sad16x8x3_ssse3_aligned_by_0 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_1 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_2 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_3 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_4 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_5 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_6 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_7 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_8 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_9 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_10 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_11 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_12 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_13 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_14 - .vpx_sad16x8x3_ssse3_do_jump - dd .vpx_sad16x8x3_ssse3_aligned_by_15 - .vpx_sad16x8x3_ssse3_do_jump -.vpx_sad16x8x3_ssse3_skiptable: - - call .vpx_sad16x8x3_ssse3_do_jump -.vpx_sad16x8x3_ssse3_do_jump: + jmp .aom_sad16x8x3_ssse3_skiptable +.aom_sad16x8x3_ssse3_jumptable: + dd .aom_sad16x8x3_ssse3_aligned_by_0 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_1 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_2 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_3 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_4 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_5 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_6 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_7 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_8 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_9 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_10 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_11 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_12 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_13 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_14 - .aom_sad16x8x3_ssse3_do_jump + dd .aom_sad16x8x3_ssse3_aligned_by_15 - .aom_sad16x8x3_ssse3_do_jump +.aom_sad16x8x3_ssse3_skiptable: + + call .aom_sad16x8x3_ssse3_do_jump +.aom_sad16x8x3_ssse3_do_jump: pop rcx ; get the address of do_jump - mov rax, .vpx_sad16x8x3_ssse3_jumptable - .vpx_sad16x8x3_ssse3_do_jump - add rax, rcx ; get the absolute address of vpx_sad16x8x3_ssse3_jumptable + mov rax, .aom_sad16x8x3_ssse3_jumptable - .aom_sad16x8x3_ssse3_do_jump + add rax, rcx ; get the absolute address of aom_sad16x8x3_ssse3_jumptable movsxd rax, dword [rax + 4*rdx] ; get the 32 bit offset from the jumptable add rcx, rax @@ -316,30 +316,30 @@ sym(vpx_sad16x8x3_ssse3): jmp rcx - PROCESS_16X8X3_OFFSET 0, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 1, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 2, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 3, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 4, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 5, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 6, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 7, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 8, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 9, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 10, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 11, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 12, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 13, .vpx_sad16x8x3_ssse3 - PROCESS_16X8X3_OFFSET 14, .vpx_sad16x8x3_ssse3 - -.vpx_sad16x8x3_ssse3_aligned_by_15: + PROCESS_16X8X3_OFFSET 0, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 1, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 2, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 3, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 4, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 5, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 6, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 7, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 8, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 9, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 10, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 11, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 12, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 13, .aom_sad16x8x3_ssse3 + PROCESS_16X8X3_OFFSET 14, .aom_sad16x8x3_ssse3 + +.aom_sad16x8x3_ssse3_aligned_by_15: PROCESS_16X2X3 1 PROCESS_16X2X3 0 PROCESS_16X2X3 0 PROCESS_16X2X3 0 -.vpx_sad16x8x3_ssse3_store_off: +.aom_sad16x8x3_ssse3_store_off: mov rdi, arg(4) ;Results movq xmm0, xmm5 diff --git a/aom_dsp/x86/ssim_opt_x86_64.asm b/aom_dsp/x86/ssim_opt_x86_64.asm index fc49c30d530a40b41e991b21b24362f64a374fc8..ebc37031068cbedf57aa221be0c7de7dcd10315e 100644 --- a/aom_dsp/x86/ssim_opt_x86_64.asm +++ b/aom_dsp/x86/ssim_opt_x86_64.asm @@ -61,8 +61,8 @@ ; or pavgb At this point this is just meant to be first pass for calculating ; all the parms needed for 16x16 ssim so we can play with dssim as distortion ; in mode selection code. -global sym(vpx_ssim_parms_16x16_sse2) PRIVATE -sym(vpx_ssim_parms_16x16_sse2): +global sym(aom_ssim_parms_16x16_sse2) PRIVATE +sym(aom_ssim_parms_16x16_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 9 @@ -151,8 +151,8 @@ sym(vpx_ssim_parms_16x16_sse2): ; or pavgb At this point this is just meant to be first pass for calculating ; all the parms needed for 16x16 ssim so we can play with dssim as distortion ; in mode selection code. -global sym(vpx_ssim_parms_8x8_sse2) PRIVATE -sym(vpx_ssim_parms_8x8_sse2): +global sym(aom_ssim_parms_8x8_sse2) PRIVATE +sym(aom_ssim_parms_8x8_sse2): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 9 diff --git a/aom_dsp/x86/subpel_variance_sse2.asm b/aom_dsp/x86/subpel_variance_sse2.asm index c655e4b346923465dedac786376d504ebd7ed023..541d1b29a7368370cf9e903d0c5eece9acad8d76 100644 --- a/aom_dsp/x86/subpel_variance_sse2.asm +++ b/aom_dsp/x86/subpel_variance_sse2.asm @@ -39,7 +39,7 @@ bilin_filter_m_ssse3: times 8 db 16, 0 SECTION .text -; int vpx_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride, +; int aom_sub_pixel_varianceNxh(const uint8_t *src, ptrdiff_t src_stride, ; int x_offset, int y_offset, ; const uint8_t *dst, ptrdiff_t dst_stride, ; int height, unsigned int *sse); diff --git a/aom_dsp/x86/subtract_sse2.asm b/aom_dsp/x86/subtract_sse2.asm index 4273efb85465fb89cf3640d23a2c293b3d60eb83..f47cefee52a3b4cef8c8c1ab8fb5956062c9241e 100644 --- a/aom_dsp/x86/subtract_sse2.asm +++ b/aom_dsp/x86/subtract_sse2.asm @@ -12,7 +12,7 @@ SECTION .text -; void vpx_subtract_block(int rows, int cols, +; void aom_subtract_block(int rows, int cols, ; int16_t *diff, ptrdiff_t diff_stride, ; const uint8_t *src, ptrdiff_t src_stride, ; const uint8_t *pred, ptrdiff_t pred_stride) diff --git a/aom_dsp/x86/txfm_common_sse2.h b/aom_dsp/x86/txfm_common_sse2.h index bcaa3443ee6bd4a115bb7bcfd09c7898eb8f37b9..0a6a2272ddbb23b8240dde9b4ffe171e91656a7a 100644 --- a/aom_dsp/x86/txfm_common_sse2.h +++ b/aom_dsp/x86/txfm_common_sse2.h @@ -13,7 +13,7 @@ #define VPX_DSP_X86_TXFM_COMMON_SSE2_H_ #include -#include "aom/vpx_integer.h" +#include "aom/aom_integer.h" #define pair_set_epi16(a, b) \ _mm_set_epi16((int16_t)(b), (int16_t)(a), (int16_t)(b), (int16_t)(a), \ diff --git a/aom_dsp/x86/variance_avx2.c b/aom_dsp/x86/variance_avx2.c index ffe4ae55d13908225c8d39f27907d04419325508..a02ebd08f5af1c2d06e3551706cfc8e710dec3cf 100644 --- a/aom_dsp/x86/variance_avx2.c +++ b/aom_dsp/x86/variance_avx2.c @@ -8,13 +8,13 @@ * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" typedef void (*get_var_avx2)(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse, int *sum); -void vpx_get32x32var_avx2(const uint8_t *src, int src_stride, +void aom_get32x32var_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse, int *sum); @@ -39,104 +39,104 @@ static void variance_avx2(const uint8_t *src, int src_stride, } } -unsigned int vpx_variance16x16_avx2(const uint8_t *src, int src_stride, +unsigned int aom_variance16x16_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_avx2(src, src_stride, ref, ref_stride, 16, 16, sse, &sum, - vpx_get16x16var_avx2, 16); + aom_get16x16var_avx2, 16); return *sse - (((unsigned int)sum * sum) >> 8); } -unsigned int vpx_mse16x16_avx2(const uint8_t *src, int src_stride, +unsigned int aom_mse16x16_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; - vpx_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum); + aom_get16x16var_avx2(src, src_stride, ref, ref_stride, sse, &sum); return *sse; } -unsigned int vpx_variance32x16_avx2(const uint8_t *src, int src_stride, +unsigned int aom_variance32x16_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_avx2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum, - vpx_get32x32var_avx2, 32); + aom_get32x32var_avx2, 32); return *sse - (((int64_t)sum * sum) >> 9); } -unsigned int vpx_variance32x32_avx2(const uint8_t *src, int src_stride, +unsigned int aom_variance32x32_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_avx2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum, - vpx_get32x32var_avx2, 32); + aom_get32x32var_avx2, 32); return *sse - (((int64_t)sum * sum) >> 10); } -unsigned int vpx_variance64x64_avx2(const uint8_t *src, int src_stride, +unsigned int aom_variance64x64_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_avx2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum, - vpx_get32x32var_avx2, 32); + aom_get32x32var_avx2, 32); return *sse - (((int64_t)sum * sum) >> 12); } -unsigned int vpx_variance64x32_avx2(const uint8_t *src, int src_stride, +unsigned int aom_variance64x32_avx2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_avx2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum, - vpx_get32x32var_avx2, 32); + aom_get32x32var_avx2, 32); return *sse - (((int64_t)sum * sum) >> 11); } -unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, +unsigned int aom_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, int height, unsigned int *sse); -unsigned int vpx_sub_pixel_avg_variance32xh_avx2( +unsigned int aom_sub_pixel_avg_variance32xh_avx2( const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride, int height, unsigned int *sseptr); -unsigned int vpx_sub_pixel_variance64x64_avx2(const uint8_t *src, +unsigned int aom_sub_pixel_variance64x64_avx2(const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, unsigned int *sse) { unsigned int sse1; - const int se1 = vpx_sub_pixel_variance32xh_avx2( + const int se1 = aom_sub_pixel_variance32xh_avx2( src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1); unsigned int sse2; const int se2 = - vpx_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset, + aom_sub_pixel_variance32xh_avx2(src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, 64, &sse2); const int se = se1 + se2; *sse = sse1 + sse2; return *sse - (((int64_t)se * se) >> 12); } -unsigned int vpx_sub_pixel_variance32x32_avx2(const uint8_t *src, +unsigned int aom_sub_pixel_variance32x32_avx2(const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, unsigned int *sse) { - const int se = vpx_sub_pixel_variance32xh_avx2( + const int se = aom_sub_pixel_variance32xh_avx2( src, src_stride, x_offset, y_offset, dst, dst_stride, 32, sse); return *sse - (((int64_t)se * se) >> 10); } -unsigned int vpx_sub_pixel_avg_variance64x64_avx2( +unsigned int aom_sub_pixel_avg_variance64x64_avx2( const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) { unsigned int sse1; - const int se1 = vpx_sub_pixel_avg_variance32xh_avx2( + const int se1 = aom_sub_pixel_avg_variance32xh_avx2( src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1); unsigned int sse2; - const int se2 = vpx_sub_pixel_avg_variance32xh_avx2( + const int se2 = aom_sub_pixel_avg_variance32xh_avx2( src + 32, src_stride, x_offset, y_offset, dst + 32, dst_stride, sec + 32, 64, 64, &sse2); const int se = se1 + se2; @@ -146,11 +146,11 @@ unsigned int vpx_sub_pixel_avg_variance64x64_avx2( return *sse - (((int64_t)se * se) >> 12); } -unsigned int vpx_sub_pixel_avg_variance32x32_avx2( +unsigned int aom_sub_pixel_avg_variance32x32_avx2( const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, unsigned int *sse, const uint8_t *sec) { // Process 32 elements in parallel. - const int se = vpx_sub_pixel_avg_variance32xh_avx2( + const int se = aom_sub_pixel_avg_variance32xh_avx2( src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 32, 32, sse); return *sse - (((int64_t)se * se) >> 10); } diff --git a/aom_dsp/x86/variance_impl_avx2.c b/aom_dsp/x86/variance_impl_avx2.c index af5b93cf3a2b8d950d23a9662fbea803139caa09..e925df7aff890999ccfb3d085e02f9c3d258ea73 100644 --- a/aom_dsp/x86/variance_impl_avx2.c +++ b/aom_dsp/x86/variance_impl_avx2.c @@ -11,7 +11,7 @@ #include // AVX2 -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = { @@ -31,7 +31,7 @@ DECLARE_ALIGNED(32, static const uint8_t, bilinear_filters_avx2[512]) = { 14, 2, 14, 2, 14, 2, 14, 2, 14, }; -void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride, +void aom_get16x16var_avx2(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum) { __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; @@ -137,7 +137,7 @@ void vpx_get16x16var_avx2(const unsigned char *src_ptr, int source_stride, } } -void vpx_get32x32var_avx2(const unsigned char *src_ptr, int source_stride, +void aom_get32x32var_avx2(const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int recon_stride, unsigned int *SSE, int *Sum) { __m256i src, src_expand_low, src_expand_high, ref, ref_expand_low; @@ -294,7 +294,7 @@ void vpx_get32x32var_avx2(const unsigned char *src_ptr, int source_stride, sum = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_reg)) + \ _mm_cvtsi128_si32(_mm256_extractf128_si256(sum_reg, 1)); -unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, +unsigned int aom_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, int height, unsigned int *sse) { @@ -481,7 +481,7 @@ unsigned int vpx_sub_pixel_variance32xh_avx2(const uint8_t *src, int src_stride, return sum; } -unsigned int vpx_sub_pixel_avg_variance32xh_avx2( +unsigned int aom_sub_pixel_avg_variance32xh_avx2( const uint8_t *src, int src_stride, int x_offset, int y_offset, const uint8_t *dst, int dst_stride, const uint8_t *sec, int sec_stride, int height, unsigned int *sse) { diff --git a/aom_dsp/x86/variance_impl_mmx.asm b/aom_dsp/x86/variance_impl_mmx.asm index 5500d975b9524132c80e2433ca3fde684d286f59..7c79448fcc47ef366072aef6a7386002c3c5f111 100644 --- a/aom_dsp/x86/variance_impl_mmx.asm +++ b/aom_dsp/x86/variance_impl_mmx.asm @@ -13,9 +13,9 @@ %define mmx_filter_shift 7 -;unsigned int vpx_get_mb_ss_mmx( short *src_ptr ) -global sym(vpx_get_mb_ss_mmx) PRIVATE -sym(vpx_get_mb_ss_mmx): +;unsigned int aom_get_mb_ss_mmx( short *src_ptr ) +global sym(aom_get_mb_ss_mmx) PRIVATE +sym(aom_get_mb_ss_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 7 @@ -63,7 +63,7 @@ sym(vpx_get_mb_ss_mmx): pop rbp ret -;void vpx_get8x8var_mmx +;void aom_get8x8var_mmx ;( ; unsigned char *src_ptr, ; int source_stride, @@ -72,8 +72,8 @@ sym(vpx_get_mb_ss_mmx): ; unsigned int *SSE, ; int *Sum ;) -global sym(vpx_get8x8var_mmx) PRIVATE -sym(vpx_get8x8var_mmx): +global sym(aom_get8x8var_mmx) PRIVATE +sym(aom_get8x8var_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -306,7 +306,7 @@ sym(vpx_get8x8var_mmx): ret ;void -;vpx_get4x4var_mmx +;aom_get4x4var_mmx ;( ; unsigned char *src_ptr, ; int source_stride, @@ -315,8 +315,8 @@ sym(vpx_get8x8var_mmx): ; unsigned int *SSE, ; int *Sum ;) -global sym(vpx_get4x4var_mmx) PRIVATE -sym(vpx_get4x4var_mmx): +global sym(aom_get4x4var_mmx) PRIVATE +sym(aom_get4x4var_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 6 @@ -414,7 +414,7 @@ sym(vpx_get4x4var_mmx): pop rbp ret -;void vpx_filter_block2d_bil4x4_var_mmx +;void aom_filter_block2d_bil4x4_var_mmx ;( ; unsigned char *ref_ptr, ; int ref_pixels_per_line, @@ -425,8 +425,8 @@ sym(vpx_get4x4var_mmx): ; int *sum, ; unsigned int *sumsquared ;) -global sym(vpx_filter_block2d_bil4x4_var_mmx) PRIVATE -sym(vpx_filter_block2d_bil4x4_var_mmx): +global sym(aom_filter_block2d_bil4x4_var_mmx) PRIVATE +sym(aom_filter_block2d_bil4x4_var_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 8 @@ -550,7 +550,7 @@ sym(vpx_filter_block2d_bil4x4_var_mmx): pop rbp ret -;void vpx_filter_block2d_bil_var_mmx +;void aom_filter_block2d_bil_var_mmx ;( ; unsigned char *ref_ptr, ; int ref_pixels_per_line, @@ -562,8 +562,8 @@ sym(vpx_filter_block2d_bil4x4_var_mmx): ; int *sum, ; unsigned int *sumsquared ;) -global sym(vpx_filter_block2d_bil_var_mmx) PRIVATE -sym(vpx_filter_block2d_bil_var_mmx): +global sym(aom_filter_block2d_bil_var_mmx) PRIVATE +sym(aom_filter_block2d_bil_var_mmx): push rbp mov rbp, rsp SHADOW_ARGS_TO_STACK 9 diff --git a/aom_dsp/x86/variance_mmx.c b/aom_dsp/x86/variance_mmx.c index a168a7e06ea0ba174c447453e6cb2b801442a985..da67fa5fdb83bd81813dc5c244d442472dbe0bcb 100644 --- a/aom_dsp/x86/variance_mmx.c +++ b/aom_dsp/x86/variance_mmx.c @@ -9,7 +9,7 @@ * PATENTS file, you can obtain it at www.aomedia.org/license/patent. */ -#include "./vpx_dsp_rtcd.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" @@ -20,10 +20,10 @@ DECLARE_ALIGNED(16, static const int16_t, bilinear_filters_mmx[8][8]) = { { 32, 32, 32, 32, 96, 96, 96, 96 }, { 16, 16, 16, 16, 112, 112, 112, 112 } }; -extern void vpx_get4x4var_mmx(const uint8_t *a, int a_stride, const uint8_t *b, +extern void aom_get4x4var_mmx(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, unsigned int *sse, int *sum); -extern void vpx_filter_block2d_bil4x4_var_mmx(const unsigned char *ref_ptr, +extern void aom_filter_block2d_bil4x4_var_mmx(const unsigned char *ref_ptr, int ref_pixels_per_line, const unsigned char *src_ptr, int src_pixels_per_line, @@ -31,46 +31,46 @@ extern void vpx_filter_block2d_bil4x4_var_mmx(const unsigned char *ref_ptr, const int16_t *VFilter, int *sum, unsigned int *sumsquared); -extern void vpx_filter_block2d_bil_var_mmx( +extern void aom_filter_block2d_bil_var_mmx( const unsigned char *ref_ptr, int ref_pixels_per_line, const unsigned char *src_ptr, int src_pixels_per_line, unsigned int Height, const int16_t *HFilter, const int16_t *VFilter, int *sum, unsigned int *sumsquared); -unsigned int vpx_variance4x4_mmx(const unsigned char *a, int a_stride, +unsigned int aom_variance4x4_mmx(const unsigned char *a, int a_stride, const unsigned char *b, int b_stride, unsigned int *sse) { unsigned int var; int avg; - vpx_get4x4var_mmx(a, a_stride, b, b_stride, &var, &avg); + aom_get4x4var_mmx(a, a_stride, b, b_stride, &var, &avg); *sse = var; return (var - (((unsigned int)avg * avg) >> 4)); } -unsigned int vpx_variance8x8_mmx(const unsigned char *a, int a_stride, +unsigned int aom_variance8x8_mmx(const unsigned char *a, int a_stride, const unsigned char *b, int b_stride, unsigned int *sse) { unsigned int var; int avg; - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &var, &avg); + aom_get8x8var_mmx(a, a_stride, b, b_stride, &var, &avg); *sse = var; return (var - (((unsigned int)avg * avg) >> 6)); } -unsigned int vpx_mse16x16_mmx(const unsigned char *a, int a_stride, +unsigned int aom_mse16x16_mmx(const unsigned char *a, int a_stride, const unsigned char *b, int b_stride, unsigned int *sse) { unsigned int sse0, sse1, sse2, sse3, var; int sum0, sum1, sum2, sum3; - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); - vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride, + aom_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); + aom_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); + aom_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride, &sse2, &sum2); - vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, b + 8 * b_stride + 8, + aom_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, b + 8 * b_stride + 8, b_stride, &sse3, &sum3); var = sse0 + sse1 + sse2 + sse3; @@ -78,17 +78,17 @@ unsigned int vpx_mse16x16_mmx(const unsigned char *a, int a_stride, return var; } -unsigned int vpx_variance16x16_mmx(const unsigned char *a, int a_stride, +unsigned int aom_variance16x16_mmx(const unsigned char *a, int a_stride, const unsigned char *b, int b_stride, unsigned int *sse) { unsigned int sse0, sse1, sse2, sse3, var; int sum0, sum1, sum2, sum3, avg; - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); - vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride, + aom_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); + aom_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); + aom_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride, &sse2, &sum2); - vpx_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, b + 8 * b_stride + 8, + aom_get8x8var_mmx(a + 8 * a_stride + 8, a_stride, b + 8 * b_stride + 8, b_stride, &sse3, &sum3); var = sse0 + sse1 + sse2 + sse3; @@ -97,14 +97,14 @@ unsigned int vpx_variance16x16_mmx(const unsigned char *a, int a_stride, return (var - (((unsigned int)avg * avg) >> 8)); } -unsigned int vpx_variance16x8_mmx(const unsigned char *a, int a_stride, +unsigned int aom_variance16x8_mmx(const unsigned char *a, int a_stride, const unsigned char *b, int b_stride, unsigned int *sse) { unsigned int sse0, sse1, var; int sum0, sum1, avg; - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); + aom_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); + aom_get8x8var_mmx(a + 8, a_stride, b + 8, b_stride, &sse1, &sum1); var = sse0 + sse1; avg = sum0 + sum1; @@ -112,14 +112,14 @@ unsigned int vpx_variance16x8_mmx(const unsigned char *a, int a_stride, return (var - (((unsigned int)avg * avg) >> 7)); } -unsigned int vpx_variance8x16_mmx(const unsigned char *a, int a_stride, +unsigned int aom_variance8x16_mmx(const unsigned char *a, int a_stride, const unsigned char *b, int b_stride, unsigned int *sse) { unsigned int sse0, sse1, var; int sum0, sum1, avg; - vpx_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); - vpx_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride, + aom_get8x8var_mmx(a, a_stride, b, b_stride, &sse0, &sum0); + aom_get8x8var_mmx(a + 8 * a_stride, a_stride, b + 8 * b_stride, b_stride, &sse1, &sum1); var = sse0 + sse1; @@ -129,44 +129,44 @@ unsigned int vpx_variance8x16_mmx(const unsigned char *a, int a_stride, return (var - (((unsigned int)avg * avg) >> 7)); } -uint32_t vpx_sub_pixel_variance4x4_mmx(const uint8_t *a, int a_stride, +uint32_t aom_sub_pixel_variance4x4_mmx(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, uint32_t *sse) { int xsum; unsigned int xxsum; - vpx_filter_block2d_bil4x4_var_mmx( + aom_filter_block2d_bil4x4_var_mmx( a, a_stride, b, b_stride, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum, &xxsum); *sse = xxsum; return (xxsum - (((unsigned int)xsum * xsum) >> 4)); } -uint32_t vpx_sub_pixel_variance8x8_mmx(const uint8_t *a, int a_stride, +uint32_t aom_sub_pixel_variance8x8_mmx(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, uint32_t *sse) { int xsum; uint32_t xxsum; - vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8, + aom_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 8, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum, &xxsum); *sse = xxsum; return (xxsum - (((uint32_t)xsum * xsum) >> 6)); } -uint32_t vpx_sub_pixel_variance16x16_mmx(const uint8_t *a, int a_stride, +uint32_t aom_sub_pixel_variance16x16_mmx(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, uint32_t *sse) { int xsum0, xsum1; unsigned int xxsum0, xxsum1; - vpx_filter_block2d_bil_var_mmx( + aom_filter_block2d_bil_var_mmx( a, a_stride, b, b_stride, 16, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum0, &xxsum0); - vpx_filter_block2d_bil_var_mmx( + aom_filter_block2d_bil_var_mmx( a + 8, a_stride, b + 8, b_stride, 16, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum1, &xxsum1); @@ -177,18 +177,18 @@ uint32_t vpx_sub_pixel_variance16x16_mmx(const uint8_t *a, int a_stride, return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 8)); } -uint32_t vpx_sub_pixel_variance16x8_mmx(const uint8_t *a, int a_stride, +uint32_t aom_sub_pixel_variance16x8_mmx(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, uint32_t *sse) { int xsum0, xsum1; unsigned int xxsum0, xxsum1; - vpx_filter_block2d_bil_var_mmx( + aom_filter_block2d_bil_var_mmx( a, a_stride, b, b_stride, 8, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum0, &xxsum0); - vpx_filter_block2d_bil_var_mmx( + aom_filter_block2d_bil_var_mmx( a + 8, a_stride, b + 8, b_stride, 8, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum1, &xxsum1); @@ -199,33 +199,33 @@ uint32_t vpx_sub_pixel_variance16x8_mmx(const uint8_t *a, int a_stride, return (xxsum0 - (((uint32_t)xsum0 * xsum0) >> 7)); } -uint32_t vpx_sub_pixel_variance8x16_mmx(const uint8_t *a, int a_stride, +uint32_t aom_sub_pixel_variance8x16_mmx(const uint8_t *a, int a_stride, int xoffset, int yoffset, const uint8_t *b, int b_stride, uint32_t *sse) { int xsum; unsigned int xxsum; - vpx_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16, + aom_filter_block2d_bil_var_mmx(a, a_stride, b, b_stride, 16, bilinear_filters_mmx[xoffset], bilinear_filters_mmx[yoffset], &xsum, &xxsum); *sse = xxsum; return (xxsum - (((uint32_t)xsum * xsum) >> 7)); } -uint32_t vpx_variance_halfpixvar16x16_h_mmx(const uint8_t *a, int a_stride, +uint32_t aom_variance_halfpixvar16x16_h_mmx(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, uint32_t *sse) { - return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 4, 0, b, b_stride, sse); + return aom_sub_pixel_variance16x16_mmx(a, a_stride, 4, 0, b, b_stride, sse); } -uint32_t vpx_variance_halfpixvar16x16_v_mmx(const uint8_t *a, int a_stride, +uint32_t aom_variance_halfpixvar16x16_v_mmx(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, uint32_t *sse) { - return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 0, 4, b, b_stride, sse); + return aom_sub_pixel_variance16x16_mmx(a, a_stride, 0, 4, b, b_stride, sse); } -uint32_t vpx_variance_halfpixvar16x16_hv_mmx(const uint8_t *a, int a_stride, +uint32_t aom_variance_halfpixvar16x16_hv_mmx(const uint8_t *a, int a_stride, const uint8_t *b, int b_stride, uint32_t *sse) { - return vpx_sub_pixel_variance16x16_mmx(a, a_stride, 4, 4, b, b_stride, sse); + return aom_sub_pixel_variance16x16_mmx(a, a_stride, 4, 4, b, b_stride, sse); } diff --git a/aom_dsp/x86/variance_sse2.c b/aom_dsp/x86/variance_sse2.c index 1e7f0fb9ca66385d691e1e1c587e026cb323ee1d..823e016caa109f39dfc0324f2715c7a2cf660f9b 100644 --- a/aom_dsp/x86/variance_sse2.c +++ b/aom_dsp/x86/variance_sse2.c @@ -11,8 +11,8 @@ #include // SSE2 -#include "./vpx_config.h" -#include "./vpx_dsp_rtcd.h" +#include "./aom_config.h" +#include "./aom_dsp_rtcd.h" #include "aom_ports/mem.h" @@ -20,7 +20,7 @@ typedef void (*getNxMvar_fn_t)(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse, int *sum); -unsigned int vpx_get_mb_ss_sse2(const int16_t *src) { +unsigned int aom_get_mb_ss_sse2(const int16_t *src) { __m128i vsum = _mm_setzero_si128(); int i; @@ -66,7 +66,7 @@ static void get4x4var_sse2(const uint8_t *src, int src_stride, *sse = _mm_cvtsi128_si32(vsum); } -void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, +void aom_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse, int *sum) { const __m128i zero = _mm_setzero_si128(); __m128i vsum = _mm_setzero_si128(); @@ -104,7 +104,7 @@ void vpx_get8x8var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, *sse = _mm_cvtsi128_si32(vsse); } -void vpx_get16x16var_sse2(const uint8_t *src, int src_stride, +void aom_get16x16var_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse, int *sum) { const __m128i zero = _mm_setzero_si128(); @@ -166,7 +166,7 @@ static void variance_sse2(const unsigned char *src, int src_stride, } } -unsigned int vpx_variance4x4_sse2(const unsigned char *src, int src_stride, +unsigned int aom_variance4x4_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; @@ -174,7 +174,7 @@ unsigned int vpx_variance4x4_sse2(const unsigned char *src, int src_stride, return *sse - (((unsigned int)sum * sum) >> 4); } -unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance8x4_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; @@ -183,7 +183,7 @@ unsigned int vpx_variance8x4_sse2(const uint8_t *src, int src_stride, return *sse - (((unsigned int)sum * sum) >> 5); } -unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance4x8_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; @@ -192,119 +192,119 @@ unsigned int vpx_variance4x8_sse2(const uint8_t *src, int src_stride, return *sse - (((unsigned int)sum * sum) >> 5); } -unsigned int vpx_variance8x8_sse2(const unsigned char *src, int src_stride, +unsigned int aom_variance8x8_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; - vpx_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum); + aom_get8x8var_sse2(src, src_stride, ref, ref_stride, sse, &sum); return *sse - (((unsigned int)sum * sum) >> 6); } -unsigned int vpx_variance16x8_sse2(const unsigned char *src, int src_stride, +unsigned int aom_variance16x8_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 16, 8, sse, &sum, - vpx_get8x8var_sse2, 8); + aom_get8x8var_sse2, 8); return *sse - (((unsigned int)sum * sum) >> 7); } -unsigned int vpx_variance8x16_sse2(const unsigned char *src, int src_stride, +unsigned int aom_variance8x16_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 8, 16, sse, &sum, - vpx_get8x8var_sse2, 8); + aom_get8x8var_sse2, 8); return *sse - (((unsigned int)sum * sum) >> 7); } -unsigned int vpx_variance16x16_sse2(const unsigned char *src, int src_stride, +unsigned int aom_variance16x16_sse2(const unsigned char *src, int src_stride, const unsigned char *ref, int ref_stride, unsigned int *sse) { int sum; - vpx_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum); + aom_get16x16var_sse2(src, src_stride, ref, ref_stride, sse, &sum); return *sse - (((unsigned int)sum * sum) >> 8); } -unsigned int vpx_variance32x32_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance32x32_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 32, 32, sse, &sum, - vpx_get16x16var_sse2, 16); + aom_get16x16var_sse2, 16); return *sse - (((int64_t)sum * sum) >> 10); } -unsigned int vpx_variance32x16_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance32x16_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 32, 16, sse, &sum, - vpx_get16x16var_sse2, 16); + aom_get16x16var_sse2, 16); return *sse - (((int64_t)sum * sum) >> 9); } -unsigned int vpx_variance16x32_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance16x32_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 16, 32, sse, &sum, - vpx_get16x16var_sse2, 16); + aom_get16x16var_sse2, 16); return *sse - (((int64_t)sum * sum) >> 9); } -unsigned int vpx_variance64x64_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance64x64_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 64, 64, sse, &sum, - vpx_get16x16var_sse2, 16); + aom_get16x16var_sse2, 16); return *sse - (((int64_t)sum * sum) >> 12); } -unsigned int vpx_variance64x32_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance64x32_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 64, 32, sse, &sum, - vpx_get16x16var_sse2, 16); + aom_get16x16var_sse2, 16); return *sse - (((int64_t)sum * sum) >> 11); } -unsigned int vpx_variance32x64_sse2(const uint8_t *src, int src_stride, +unsigned int aom_variance32x64_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { int sum; variance_sse2(src, src_stride, ref, ref_stride, 32, 64, sse, &sum, - vpx_get16x16var_sse2, 16); + aom_get16x16var_sse2, 16); return *sse - (((int64_t)sum * sum) >> 11); } -unsigned int vpx_mse8x8_sse2(const uint8_t *src, int src_stride, +unsigned int aom_mse8x8_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { - vpx_variance8x8_sse2(src, src_stride, ref, ref_stride, sse); + aom_variance8x8_sse2(src, src_stride, ref, ref_stride, sse); return *sse; } -unsigned int vpx_mse8x16_sse2(const uint8_t *src, int src_stride, +unsigned int aom_mse8x16_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { - vpx_variance8x16_sse2(src, src_stride, ref, ref_stride, sse); + aom_variance8x16_sse2(src, src_stride, ref, ref_stride, sse); return *sse; } -unsigned int vpx_mse16x8_sse2(const uint8_t *src, int src_stride, +unsigned int aom_mse16x8_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { - vpx_variance16x8_sse2(src, src_stride, ref, ref_stride, sse); + aom_variance16x8_sse2(src, src_stride, ref, ref_stride, sse); return *sse; } -unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride, +unsigned int aom_mse16x16_sse2(const uint8_t *src, int src_stride, const uint8_t *ref, int ref_stride, unsigned int *sse) { - vpx_variance16x16_sse2(src, src_stride, ref, ref_stride, sse); + aom_variance16x16_sse2(src, src_stride, ref, ref_stride, sse); return *sse; } @@ -312,7 +312,7 @@ unsigned int vpx_mse16x16_sse2(const uint8_t *src, int src_stride, // The 2 unused parameters are place holders