diff --git a/aom/aom_codec.mk b/aom/aom_codec.mk
index 8ac60334fec2212c6b3882be1fd9ac96f6458cd8..4f385c6ce8b998d0018ad0be3c52773f63f44b25 100644
--- a/aom/aom_codec.mk
+++ b/aom/aom_codec.mk
@@ -12,14 +12,14 @@
 API_EXPORTS += exports
 
 API_SRCS-$(CONFIG_V10_ENCODER) += vp8.h
-API_SRCS-$(CONFIG_VP10_ENCODER) += vp8cx.h
-API_DOC_SRCS-$(CONFIG_VP10_ENCODER) += vp8.h
-API_DOC_SRCS-$(CONFIG_VP10_ENCODER) += vp8cx.h
+API_SRCS-$(CONFIG_AV1_ENCODER) += vp8cx.h
+API_DOC_SRCS-$(CONFIG_AV1_ENCODER) += vp8.h
+API_DOC_SRCS-$(CONFIG_AV1_ENCODER) += vp8cx.h
 
-API_SRCS-$(CONFIG_VP10_DECODER) += vp8.h
-API_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h
-API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8.h
-API_DOC_SRCS-$(CONFIG_VP10_DECODER) += vp8dx.h
+API_SRCS-$(CONFIG_AV1_DECODER) += vp8.h
+API_SRCS-$(CONFIG_AV1_DECODER) += vp8dx.h
+API_DOC_SRCS-$(CONFIG_AV1_DECODER) += vp8.h
+API_DOC_SRCS-$(CONFIG_AV1_DECODER) += vp8dx.h
 
 API_DOC_SRCS-yes += aom_codec.h
 API_DOC_SRCS-yes += aom_decoder.h
diff --git a/aom/vp8cx.h b/aom/vp8cx.h
index 882d084377547804541c32bde1737ae38d6acc0b..e3cd33100ce2ba0837a0c96c2b1695d47f04e507 100644
--- a/aom/vp8cx.h
+++ b/aom/vp8cx.h
@@ -28,13 +28,13 @@
 extern "C" {
 #endif
 
-/*!\name Algorithm interface for VP10
+/*!\name Algorithm interface for AV1
  *
- * This interface provides the capability to encode raw VP10 streams.
+ * This interface provides the capability to encode raw AV1 streams.
  * @{
  */
-extern aom_codec_iface_t aom_codec_vp10_cx_algo;
-extern aom_codec_iface_t *aom_codec_vp10_cx(void);
+extern aom_codec_iface_t aom_codec_av1_cx_algo;
+extern aom_codec_iface_t *aom_codec_av1_cx(void);
 /*!@} - end algorithm interface member group*/
 
 /*
diff --git a/aom/vp8dx.h b/aom/vp8dx.h
index 307d217a25975e177b89cd1aa24952725bf233ac..9efbc8cdb2974cf50a60f734c5f51ae904d32ff7 100644
--- a/aom/vp8dx.h
+++ b/aom/vp8dx.h
@@ -28,13 +28,13 @@ extern "C" {
 /* Include controls common to both the encoder and decoder */
 #include "./vp8.h"
 
-/*!\name Algorithm interface for VP10
+/*!\name Algorithm interface for AV1
  *
- * This interface provides the capability to decode VP10 streams.
+ * This interface provides the capability to decode AV1 streams.
  * @{
  */
-extern aom_codec_iface_t aom_codec_vp10_dx_algo;
-extern aom_codec_iface_t *aom_codec_vp10_dx(void);
+extern aom_codec_iface_t aom_codec_av1_dx_algo;
+extern aom_codec_iface_t *aom_codec_av1_dx(void);
 /*!@} - end algorithm interface member group*/
 
 /*!\enum vp8_dec_control_id
diff --git a/aom_dsp/aom_convolve.c b/aom_dsp/aom_convolve.c
index 233e8502073a6d46d5c27f84573063519bb004f7..d74957b78c8a634241ad7d68f8e35b208fd00110 100644
--- a/aom_dsp/aom_convolve.c
+++ b/aom_dsp/aom_convolve.c
@@ -329,7 +329,7 @@ void aom_scaled_avg_2d_c(const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,
                       filter_y, y_step_q4, w, h);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_convolve_horiz(const uint8_t *src8, ptrdiff_t src_stride,
                                   uint8_t *dst8, ptrdiff_t dst_stride,
                                   const InterpKernel *x_filters, int x0_q4,
diff --git a/aom_dsp/aom_convolve.h b/aom_dsp/aom_convolve.h
index 1fc4af4f6ff3c0ccde5d2cf3b35c86d93616f1de..6f17c4a285ceae35c8da0525ea3fe3a831b94208 100644
--- a/aom_dsp/aom_convolve.h
+++ b/aom_dsp/aom_convolve.h
@@ -24,7 +24,7 @@ typedef void (*convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
                               const int16_t *filter_y, int y_step_q4, int w,
                               int h);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*highbd_convolve_fn_t)(const uint8_t *src, ptrdiff_t src_stride,
                                      uint8_t *dst, ptrdiff_t dst_stride,
                                      const int16_t *filter_x, int x_step_q4,
diff --git a/aom_dsp/aom_dsp.mk b/aom_dsp/aom_dsp.mk
index fcc04c2a062bf7aec0f440962a7b40bea434aa66..2aea32cdd58541f942496b0c5055ef69d95f9656 100644
--- a/aom_dsp/aom_dsp.mk
+++ b/aom_dsp/aom_dsp.mk
@@ -45,12 +45,12 @@ DSP_SRCS-$(HAVE_SSSE3) += x86/intrapred_ssse3.asm
 DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
 endif  # CONFIG_USE_X86INC
 
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 ifeq ($(CONFIG_USE_X86INC),yes)
 DSP_SRCS-$(HAVE_SSE)  += x86/highbd_intrapred_sse2.asm
 DSP_SRCS-$(HAVE_SSE2) += x86/highbd_intrapred_sse2.asm
 endif  # CONFIG_USE_X86INC
-endif  # CONFIG_VPX_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 
 DSP_SRCS-$(HAVE_NEON_ASM) += arm/intrapred_neon_asm$(ASM)
 DSP_SRCS-$(HAVE_NEON) += arm/intrapred_neon.c
@@ -75,7 +75,7 @@ DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_ssse3.asm
 DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_bilinear_ssse3.asm
 DSP_SRCS-$(HAVE_AVX2)  += x86/aom_subpixel_8t_intrin_avx2.c
 DSP_SRCS-$(HAVE_SSSE3) += x86/aom_subpixel_8t_intrin_ssse3.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)  += x86/aom_high_subpixel_8t_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)  += x86/aom_high_subpixel_bilinear_sse2.asm
 endif
@@ -156,15 +156,15 @@ DSP_SRCS-$(HAVE_DSPR2)  += mips/loopfilter_mb_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2)  += mips/loopfilter_mb_horiz_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2)  += mips/loopfilter_mb_vert_dspr2.c
 
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_loopfilter_sse2.c
-endif  # CONFIG_VPX_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 
 DSP_SRCS-yes            += txfm_common.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/txfm_common_sse2.h
 DSP_SRCS-$(HAVE_MSA)    += mips/txfm_macros_msa.h
 # forward transform
-ifneq ($(filter yes,$(CONFIG_VP10_ENCODER)),)
+ifneq ($(filter yes,$(CONFIG_AV1_ENCODER)),)
 DSP_SRCS-yes            += fwd_txfm.c
 DSP_SRCS-yes            += fwd_txfm.h
 DSP_SRCS-$(HAVE_SSE2)   += x86/fwd_txfm_sse2.h
@@ -182,10 +182,10 @@ DSP_SRCS-$(HAVE_NEON)   += arm/fwd_txfm_neon.c
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_txfm_msa.h
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_txfm_msa.c
 DSP_SRCS-$(HAVE_MSA)    += mips/fwd_dct32x32_msa.c
-endif  # CONFIG_VP10_ENCODER
+endif  # CONFIG_AV1_ENCODER
 
 # inverse transform
-ifneq ($(filter yes,$(CONFIG_VP10)),)
+ifneq ($(filter yes,$(CONFIG_AV1)),)
 DSP_SRCS-yes            += inv_txfm.h
 DSP_SRCS-yes            += inv_txfm.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/inv_txfm_sse2.h
@@ -227,23 +227,23 @@ DSP_SRCS-$(HAVE_MSA)   += mips/idct8x8_msa.c
 DSP_SRCS-$(HAVE_MSA)   += mips/idct16x16_msa.c
 DSP_SRCS-$(HAVE_MSA)   += mips/idct32x32_msa.c
 
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_DSPR2) += mips/inv_txfm_dspr2.h
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans4_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans8_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans16_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_dspr2.c
 DSP_SRCS-$(HAVE_DSPR2) += mips/itrans32_cols_dspr2.c
-endif  # CONFIG_VPX_HIGHBITDEPTH
-endif  # CONFIG_VP10
+endif  # CONFIG_AOM_HIGHBITDEPTH
+endif  # CONFIG_AV1
 
 # quantization
-ifneq ($(filter yes, $(CONFIG_VP10_ENCODER)),)
+ifneq ($(filter yes, $(CONFIG_AV1_ENCODER)),)
 DSP_SRCS-yes            += quantize.c
 DSP_SRCS-yes            += quantize.h
 
 DSP_SRCS-$(HAVE_SSE2)   += x86/quantize_sse2.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_quantize_intrin_sse2.c
 endif
 ifeq ($(ARCH_X86_64),yes)
@@ -264,7 +264,7 @@ DSP_SRCS-$(HAVE_SSSE3) += x86/avg_ssse3_x86_64.asm
 endif
 endif
 
-endif  # CONFIG_VP10_ENCODER
+endif  # CONFIG_AV1_ENCODER
 
 ifeq ($(CONFIG_ENCODERS),yes)
 DSP_SRCS-yes            += sad.c
@@ -292,10 +292,10 @@ DSP_SRCS-$(HAVE_SSE2)   += x86/sad4d_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/sad_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/subtract_sse2.asm
 
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad4d_sse2.asm
 DSP_SRCS-$(HAVE_SSE2) += x86/highbd_sad_sse2.asm
-endif  # CONFIG_VPX_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 endif  # CONFIG_USE_X86INC
 
 endif  # CONFIG_ENCODERS
@@ -334,13 +334,13 @@ DSP_SRCS-$(HAVE_SSE)    += x86/subpel_variance_sse2.asm
 DSP_SRCS-$(HAVE_SSE2)   += x86/subpel_variance_sse2.asm  # Contains SSE2 and SSSE3
 endif  # CONFIG_USE_X86INC
 
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_variance_sse2.c
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_variance_impl_sse2.asm
 ifeq ($(CONFIG_USE_X86INC),yes)
 DSP_SRCS-$(HAVE_SSE2)   += x86/highbd_subpel_variance_impl_sse2.asm
 endif  # CONFIG_USE_X86INC
-endif  # CONFIG_VPX_HIGHBITDEPTH
+endif  # CONFIG_AOM_HIGHBITDEPTH
 endif  # CONFIG_ENCODERS
 
 DSP_SRCS-no += $(DSP_SRCS_REMOVE-yes)
diff --git a/aom_dsp/aom_dsp_common.h b/aom_dsp/aom_dsp_common.h
index 54a3c327d44c438e41d86bcc62ca7f6070edb05a..2372049931a794171940df083463e4d4fae4881c 100644
--- a/aom_dsp/aom_dsp_common.h
+++ b/aom_dsp/aom_dsp_common.h
@@ -29,7 +29,7 @@ typedef uint16_t qm_val_t;
 #define AOM_QM_BITS 6
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // Note:
 // tran_low_t  is the datatype used for final transform coefficients.
 // tran_high_t is the datatype used for intermediate transform stages.
@@ -41,7 +41,7 @@ typedef int32_t tran_low_t;
 // tran_high_t is the datatype used for intermediate transform stages.
 typedef int32_t tran_high_t;
 typedef int16_t tran_low_t;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static INLINE uint8_t clip_pixel(int val) {
   return (val > 255) ? 255 : (val < 0) ? 0 : val;
@@ -55,7 +55,7 @@ static INLINE double fclamp(double value, double low, double high) {
   return value < low ? low : (value > high ? high : value);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
   switch (bd) {
     case 8:
@@ -64,7 +64,7 @@ static INLINE uint16_t clip_pixel_highbd(int val, int bd) {
     case 12: return (uint16_t)clamp(val, 0, 4095);
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/aom_dsp/aom_dsp_rtcd_defs.pl b/aom_dsp/aom_dsp_rtcd_defs.pl
index 2c504a9fd62881bbf53a1086258ed3dcfdf04159..a01cd968c3faa7f0b46a32b1b19dbbf3e166e963 100644
--- a/aom_dsp/aom_dsp_rtcd_defs.pl
+++ b/aom_dsp/aom_dsp_rtcd_defs.pl
@@ -256,7 +256,7 @@ add_proto qw/void aom_dc_128_predictor_32x32/, "uint8_t *dst, ptrdiff_t y_stride
 specialize qw/aom_dc_128_predictor_32x32 msa neon/, "$sse2_x86inc";
 
 # High bitdepth functions
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   add_proto qw/void aom_highbd_d207_predictor_4x4/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
   specialize qw/aom_highbd_d207_predictor_4x4/;
 
@@ -448,7 +448,7 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
 
   add_proto qw/void aom_highbd_dc_128_predictor_32x32/, "uint16_t *dst, ptrdiff_t y_stride, const uint16_t *above, const uint16_t *left, int bd";
   specialize qw/aom_highbd_dc_128_predictor_32x32/;
-}  # CONFIG_VPX_HIGHBITDEPTH
+}  # CONFIG_AOM_HIGHBITDEPTH
 
 #
 # Sub Pixel Filters
@@ -495,7 +495,7 @@ specialize qw/aom_scaled_avg_horiz/;
 add_proto qw/void aom_scaled_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h";
 specialize qw/aom_scaled_avg_vert/;
 
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   #
   # Sub Pixel Filters
   #
@@ -522,7 +522,7 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
 
   add_proto qw/void aom_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
   specialize qw/aom_highbd_convolve8_avg_vert/, "$sse2_x86_64";
-}  # CONFIG_VPX_HIGHBITDEPTH
+}  # CONFIG_AOM_HIGHBITDEPTH
 
 #
 # Loopfilter
@@ -565,7 +565,7 @@ specialize qw/aom_lpf_horizontal_4 mmx neon dspr2 msa/;
 add_proto qw/void aom_lpf_horizontal_4_dual/, "uint8_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1";
 specialize qw/aom_lpf_horizontal_4_dual sse2 neon dspr2 msa/;
 
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   add_proto qw/void aom_highbd_lpf_vertical_16/, "uint16_t *s, int pitch, const uint8_t *blimit, const uint8_t *limit, const uint8_t *thresh, int bd";
   specialize qw/aom_highbd_lpf_vertical_16 sse2/;
 
@@ -598,7 +598,7 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
 
   add_proto qw/void aom_highbd_lpf_horizontal_4_dual/, "uint16_t *s, int pitch, const uint8_t *blimit0, const uint8_t *limit0, const uint8_t *thresh0, const uint8_t *blimit1, const uint8_t *limit1, const uint8_t *thresh1, int bd";
   specialize qw/aom_highbd_lpf_horizontal_4_dual sse2/;
-}  # CONFIG_VPX_HIGHBITDEPTH
+}  # CONFIG_AOM_HIGHBITDEPTH
 
 #
 # Encoder functions.
@@ -607,8 +607,8 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
 #
 # Forward transform
 #
-if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   add_proto qw/void aom_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/aom_fdct4x4 sse2/;
 
@@ -686,13 +686,13 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
 
   add_proto qw/void aom_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
   specialize qw/aom_fdct32x32_1 sse2 msa/;
-}  # CONFIG_VPX_HIGHBITDEPTH
-}  # CONFIG_VP10_ENCODER
+}  # CONFIG_AOM_HIGHBITDEPTH
+}  # CONFIG_AV1_ENCODER
 
 #
 # Inverse transform
-if (aom_config("CONFIG_VP10") eq "yes") {
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AV1") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   # Note as optimized versions of these functions are added we need to add a check to ensure
   # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
   add_proto qw/void aom_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
@@ -925,40 +925,40 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
     add_proto qw/void aom_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
     specialize qw/aom_iwht4x4_16_add msa/, "$sse2_x86inc";
   }  # CONFIG_EMULATE_HARDWARE
-}  # CONFIG_VPX_HIGHBITDEPTH
-}  # CONFIG_VP10
+}  # CONFIG_AOM_HIGHBITDEPTH
+}  # CONFIG_AV1
 
 #
 # Quantization
 #
 if (aom_config("CONFIG_AOM_QM") eq "yes") {
-  if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+  if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
     add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
 
     add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
 
-    if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+    if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
       add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
 
       add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
-    }  # CONFIG_VPX_HIGHBITDEPTH
-  }  # CONFIG_VP10_ENCODER
+    }  # CONFIG_AOM_HIGHBITDEPTH
+  }  # CONFIG_AV1_ENCODER
 } else {
-  if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+  if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
     add_proto qw/void aom_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
     specialize qw/aom_quantize_b sse2/, "$ssse3_x86_64_x86inc", "$avx_x86_64_x86inc";
 
     add_proto qw/void aom_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
     specialize qw/aom_quantize_b_32x32/, "$ssse3_x86_64_x86inc", "$avx_x86_64_x86inc";
 
-    if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+    if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
       add_proto qw/void aom_highbd_quantize_b/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
       specialize qw/aom_highbd_quantize_b sse2/;
 
       add_proto qw/void aom_highbd_quantize_b_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
       specialize qw/aom_highbd_quantize_b_32x32 sse2/;
-    }  # CONFIG_VPX_HIGHBITDEPTH
-  }  # CONFIG_VP10_ENCODER
+    }  # CONFIG_AOM_HIGHBITDEPTH
+  }  # CONFIG_AV1_ENCODER
 } # CONFIG_AOM_QM
 
 if (aom_config("CONFIG_ENCODERS") eq "yes") {
@@ -1013,7 +1013,7 @@ specialize qw/aom_sad4x4 mmx neon msa/, "$sse2_x86inc";
 #
 # Avg
 #
-if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
   add_proto qw/unsigned int aom_avg_8x8/, "const uint8_t *, int p";
   specialize qw/aom_avg_8x8 sse2 neon msa/;
 
@@ -1040,7 +1040,7 @@ if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
 
   add_proto qw/int aom_vector_var/, "int16_t const *ref, int16_t const *src, const int bwl";
   specialize qw/aom_vector_var neon sse2/;
-}  # CONFIG_VP10_ENCODER
+}  # CONFIG_AV1_ENCODER
 
 add_proto qw/unsigned int aom_sad64x64_avg/, "const uint8_t *src_ptr, int src_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *second_pred";
 specialize qw/aom_sad64x64_avg avx2 msa/, "$sse2_x86inc";
@@ -1187,7 +1187,7 @@ if (aom_config("CONFIG_INTERNAL_STATS") eq "yes") {
     specialize qw/aom_ssim_parms_16x16/, "$sse2_x86_64";
 }
 
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   #
   # Block subtraction
   #
@@ -1387,7 +1387,7 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
     add_proto qw/void aom_highbd_ssim_parms_8x8/, "const uint16_t *s, int sp, const uint16_t *r, int rp, uint32_t *sum_s, uint32_t *sum_r, uint32_t *sum_sq_s, uint32_t *sum_sq_r, uint32_t *sum_sxr";
     specialize qw/aom_highbd_ssim_parms_8x8/;
   }
-}  # CONFIG_VPX_HIGHBITDEPTH
+}  # CONFIG_AOM_HIGHBITDEPTH
 }  # CONFIG_ENCODERS
 
 if (aom_config("CONFIG_ENCODERS") eq "yes") {
@@ -1556,7 +1556,7 @@ add_proto qw/uint32_t aom_variance_halfpixvar16x16_v/, "const unsigned char *src
 add_proto qw/uint32_t aom_variance_halfpixvar16x16_hv/, "const unsigned char *src_ptr, int source_stride, const unsigned char *ref_ptr, int  ref_stride, uint32_t *sse";
   specialize qw/aom_variance_halfpixvar16x16_hv mmx sse2 media/;
 
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   add_proto qw/unsigned int aom_highbd_12_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, unsigned int *sse";
   specialize qw/aom_highbd_12_variance64x64 sse2/;
 
@@ -1913,7 +1913,7 @@ if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
   add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
   add_proto qw/uint32_t aom_highbd_8_sub_pixel_avg_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, uint32_t *sse, const uint8_t *second_pred";
 
-}  # CONFIG_VPX_HIGHBITDEPTH
+}  # CONFIG_AOM_HIGHBITDEPTH
 }  # CONFIG_ENCODERS
 
 1;
diff --git a/aom_dsp/avg.c b/aom_dsp/avg.c
index bbdb090d1496859647b2f7d1b64ed21aef918f88..18b5474a34996f9fdca9b68f42b448a8cbf2f4ff 100644
--- a/aom_dsp/avg.c
+++ b/aom_dsp/avg.c
@@ -189,7 +189,7 @@ void aom_minmax_8x8_c(const uint8_t *s, int p, const uint8_t *d, int dp,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 unsigned int aom_highbd_avg_8x8_c(const uint8_t *s8, int p) {
   int i, j;
   int sum = 0;
@@ -227,4 +227,4 @@ void aom_highbd_minmax_8x8_c(const uint8_t *s8, int p, const uint8_t *d8,
     }
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/fwd_txfm.c b/aom_dsp/fwd_txfm.c
index 68409f9a78a78a0532caafd11a8c89d07bf09666..4aa448a23a6450c9d131ee34971a8c8acdb572f4 100644
--- a/aom_dsp/fwd_txfm.c
+++ b/aom_dsp/fwd_txfm.c
@@ -770,7 +770,7 @@ void aom_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   output[1] = 0;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
                           int stride) {
   aom_fdct4x4_c(input, output, stride);
@@ -809,4 +809,4 @@ void aom_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
                               int stride) {
   aom_fdct32x32_1_c(input, out, stride);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/intrapred.c b/aom_dsp/intrapred.c
index e75faa61c1fa39c1ca285c7deaeced500ba726b3..83d10395d65ecd39da01e2833e45c1f9ffed3a9f 100644
--- a/aom_dsp/intrapred.c
+++ b/aom_dsp/intrapred.c
@@ -490,7 +490,7 @@ void aom_d153_predictor_4x4_c(uint8_t *dst, ptrdiff_t stride,
   DST(1, 3) = AVG3(L, K, J);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_d207_predictor(uint16_t *dst, ptrdiff_t stride,
                                          int bs, const uint16_t *above,
                                          const uint16_t *left, int bd) {
@@ -765,7 +765,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
     dst += stride;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // This serves as a wrapper function, so that all the prediction functions
 // can be unified and accessed as a pointer array. Note that the boundary
@@ -777,7 +777,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
     type##_predictor(dst, stride, size, above, left);       \
   }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define intra_pred_highbd_sized(type, size)                        \
   void aom_highbd_##type##_predictor_##size##x##size##_c(          \
       uint16_t *dst, ptrdiff_t stride, const uint16_t *above,      \
@@ -806,7 +806,7 @@ static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bs,
 #define intra_pred_no_4x4(type)                        \
   intra_pred_sized(type, 8) intra_pred_sized(type, 16) \
       intra_pred_sized(type, 32)
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 /* clang-format off */
 intra_pred_no_4x4(d207)
diff --git a/aom_dsp/inv_txfm.c b/aom_dsp/inv_txfm.c
index 0e40949b13fa015132dfb26e0726450cb86da2eb..110ba2699220a61c2d73c45fdab62c19467a90bd 100644
--- a/aom_dsp/inv_txfm.c
+++ b/aom_dsp/inv_txfm.c
@@ -1251,7 +1251,7 @@ void aom_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int bd) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
@@ -2487,4 +2487,4 @@ void aom_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
     dest += stride;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/inv_txfm.h b/aom_dsp/inv_txfm.h
index c071670ed7b5f18ed252fc97c278aa56c2a160bc..e36b35fe386f418a2590c376efe911f82d7435bc 100644
--- a/aom_dsp/inv_txfm.h
+++ b/aom_dsp/inv_txfm.h
@@ -41,7 +41,7 @@ static INLINE tran_low_t dct_const_round_shift(tran_high_t input) {
   return check_range(rv);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE tran_low_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
   // For valid highbitdepth streams, intermediate stage coefficients will
@@ -64,7 +64,7 @@ static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input,
   tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
   return highbd_check_range(rv, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -96,7 +96,7 @@ void iadst4_c(const tran_low_t *input, tran_low_t *output);
 void iadst8_c(const tran_low_t *input, tran_low_t *output);
 void iadst16_c(const tran_low_t *input, tran_low_t *output);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
 void aom_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
 void aom_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
diff --git a/aom_dsp/loopfilter.c b/aom_dsp/loopfilter.c
index da9ea918e489f15b8ea3fb14843673a04d636637..e43ebe8d66dc969f61843630fb691168883ea8d7 100644
--- a/aom_dsp/loopfilter.c
+++ b/aom_dsp/loopfilter.c
@@ -19,7 +19,7 @@ static INLINE int8_t signed_char_clamp(int t) {
   return (int8_t)clamp(t, -128, 127);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE int16_t signed_char_clamp_high(int t, int bd) {
   switch (bd) {
     case 10: return (int16_t)clamp(t, -128 * 4, 128 * 4 - 1);
@@ -342,7 +342,7 @@ void aom_lpf_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
   mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // Should we apply any filter at all: 11111111 yes, 00000000 no ?
 static INLINE int8_t highbd_filter_mask(uint8_t limit, uint8_t blimit,
                                         uint16_t p3, uint16_t p2, uint16_t p1,
@@ -706,4 +706,4 @@ void aom_highbd_lpf_vertical_16_dual_c(uint16_t *s, int p,
                                        const uint8_t *thresh, int bd) {
   highbd_mb_lpf_vertical_edge_w(s, p, blimit, limit, thresh, 16, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/quantize.c b/aom_dsp/quantize.c
index 2a194c6bb4a4f8daf8f8c8c53559853d6509c9a3..1b9bbdc62964c712f9aa939c400a3349d1dcd648 100644
--- a/aom_dsp/quantize.c
+++ b/aom_dsp/quantize.c
@@ -40,7 +40,7 @@ void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant, tran_low_t *qcoeff_ptr,
@@ -99,7 +99,7 @@ void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                                   const int16_t *round_ptr, const int16_t quant,
                                   tran_low_t *qcoeff_ptr,
@@ -192,7 +192,7 @@ void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                              int skip_block, const int16_t *zbin_ptr,
                              const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -316,7 +316,7 @@ void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_b_32x32_c(
     const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -400,7 +400,7 @@ void aom_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs, int skip_block,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant, tran_low_t *qcoeff_ptr,
@@ -450,7 +450,7 @@ void aom_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_dc_32x32(const tran_low_t *coeff_ptr, int skip_block,
                                   const int16_t *round_ptr, const int16_t quant,
                                   tran_low_t *qcoeff_ptr,
@@ -527,7 +527,7 @@ void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                              int skip_block, const int16_t *zbin_ptr,
                              const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -632,7 +632,7 @@ void aom_quantize_b_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_b_32x32_c(
     const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
diff --git a/aom_dsp/quantize.h b/aom_dsp/quantize.h
index cb941b5568a6e47424f193a7ac5dae9f9289bb87..ffb158db3a4f4a549f5e7d61e3540c7bf939e7be 100644
--- a/aom_dsp/quantize.h
+++ b/aom_dsp/quantize.h
@@ -38,7 +38,7 @@ void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                       uint16_t *eob_ptr, const int16_t *scan,
                       const int16_t *iscan, const qm_val_t *qm_ptr,
                       const qm_val_t *iqm_ptr);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
@@ -75,7 +75,7 @@ void aom_quantize_b_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                       tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
                       uint16_t *eob_ptr, const int16_t *scan,
                       const int16_t *iscan);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_dc(const tran_low_t *coeff_ptr, int n_coeffs,
                             int skip_block, const int16_t *round_ptr,
                             const int16_t quant_ptr, tran_low_t *qcoeff_ptr,
diff --git a/aom_dsp/sad.c b/aom_dsp/sad.c
index a94876aca5b7b4f914e258f2ee26bf36201dddc6..2945a4a61094b507fb4a412a6f0a16658aec2db1 100644
--- a/aom_dsp/sad.c
+++ b/aom_dsp/sad.c
@@ -54,7 +54,7 @@ static INLINE void avg_pred(uint8_t *comp_pred, const uint8_t *pred, int width,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
                                    int width, int height, const uint8_t *ref8,
                                    int ref_stride) {
@@ -71,7 +71,7 @@ static INLINE void highbd_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
     ref += ref_stride;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define sadMxN(m, n)                                                        \
   unsigned int aom_sad##m##x##n##_c(const uint8_t *src, int src_stride,     \
@@ -179,7 +179,7 @@ sadMxNxK(4, 4, 8)
 sadMxNx4D(4, 4)
 /* clang-format on */
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         static INLINE
     unsigned int highbd_sad(const uint8_t *a8, int a_stride, const uint8_t *b8,
                             int b_stride, int width, int height) {
@@ -317,4 +317,4 @@ highbd_sadMxNxK(4, 4, 8)
 highbd_sadMxNx4D(4, 4)
 /* clang-format on */
 
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/ssim.c b/aom_dsp/ssim.c
index c2080709c9d8b5a06d21c958db6e9ebc9c290f9a..52e263ebf30091c80363cdaa834d5fe1205c47b0 100644
--- a/aom_dsp/ssim.c
+++ b/aom_dsp/ssim.c
@@ -45,7 +45,7 @@ void aom_ssim_parms_8x8_c(const uint8_t *s, int sp, const uint8_t *r, int rp,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
                                  int rp, uint32_t *sum_s, uint32_t *sum_r,
                                  uint32_t *sum_sq_s, uint32_t *sum_sq_r,
@@ -61,7 +61,7 @@ void aom_highbd_ssim_parms_8x8_c(const uint16_t *s, int sp, const uint16_t *r,
     }
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static const int64_t cc1 = 26634;   // (64^2*(.01*255)^2
 static const int64_t cc2 = 239708;  // (64^2*(.03*255)^2
@@ -92,7 +92,7 @@ static double ssim_8x8(const uint8_t *s, int sp, const uint8_t *r, int rp) {
   return similarity(sum_s, sum_r, sum_sq_s, sum_sq_r, sum_sxr, 64);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r,
                               int rp, unsigned int bd) {
   uint32_t sum_s = 0, sum_r = 0, sum_sq_s = 0, sum_sq_r = 0, sum_sxr = 0;
@@ -102,7 +102,7 @@ static double highbd_ssim_8x8(const uint16_t *s, int sp, const uint16_t *r,
   return similarity(sum_s >> oshift, sum_r >> oshift, sum_sq_s >> (2 * oshift),
                     sum_sq_r >> (2 * oshift), sum_sxr >> (2 * oshift), 64);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // We are using a 8x8 moving window with starting location of each 8x8 window
 // on the 4x4 pixel grid. Such arrangement allows the windows to overlap
@@ -127,7 +127,7 @@ static double aom_ssim2(const uint8_t *img1, const uint8_t *img2,
   return ssim_total;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static double aom_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
                                int stride_img1, int stride_img2, int width,
                                int height, unsigned int bd) {
@@ -149,7 +149,7 @@ static double aom_highbd_ssim2(const uint8_t *img1, const uint8_t *img2,
   ssim_total /= samples;
   return ssim_total;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 double aom_calc_ssim(const YV12_BUFFER_CONFIG *source,
                      const YV12_BUFFER_CONFIG *dest, double *weight) {
@@ -436,7 +436,7 @@ double aom_get_ssim_metrics(uint8_t *img1, int img1_pitch, uint8_t *img2,
   return inconsistency_total;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
                             const YV12_BUFFER_CONFIG *dest, double *weight,
                             unsigned int bd) {
@@ -486,4 +486,4 @@ double aom_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source,
 
   return ssim_all;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/ssim.h b/aom_dsp/ssim.h
index afe9d9ac6a40a8486ba70a9ccaa86752e473defc..0b4d8f452c000eccc19336d768dd3bf33da093a5 100644
--- a/aom_dsp/ssim.h
+++ b/aom_dsp/ssim.h
@@ -80,7 +80,7 @@ double aom_psnrhvs(const YV12_BUFFER_CONFIG *source,
                    const YV12_BUFFER_CONFIG *dest, double *ssim_y,
                    double *ssim_u, double *ssim_v);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
                             const YV12_BUFFER_CONFIG *dest, double *weight,
                             unsigned int bd);
@@ -88,7 +88,7 @@ double aom_highbd_calc_ssim(const YV12_BUFFER_CONFIG *source,
 double aom_highbd_calc_ssimg(const YV12_BUFFER_CONFIG *source,
                              const YV12_BUFFER_CONFIG *dest, double *ssim_y,
                              double *ssim_u, double *ssim_v, unsigned int bd);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/aom_dsp/subtract.c b/aom_dsp/subtract.c
index 3890d46bed8e319826bbbf6409ea3a906c6a5575..da526c40b8d5ad9390fa5164b4aa899412f7be5a 100644
--- a/aom_dsp/subtract.c
+++ b/aom_dsp/subtract.c
@@ -32,7 +32,7 @@ void aom_subtract_block_c(int rows, int cols, int16_t *diff,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
                                  ptrdiff_t diff_stride, const uint8_t *src8,
                                  ptrdiff_t src_stride, const uint8_t *pred8,
@@ -52,4 +52,4 @@ void aom_highbd_subtract_block_c(int rows, int cols, int16_t *diff,
     src += src_stride;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/variance.c b/aom_dsp/variance.c
index 33675383c49ffe40798e9f85a4b2f96d212fab3c..bf97a6bd225a76227b5205726682c3de0e57d207 100644
--- a/aom_dsp/variance.c
+++ b/aom_dsp/variance.c
@@ -257,7 +257,7 @@ void aom_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_variance64(const uint8_t *a8, int a_stride,
                               const uint8_t *b8, int b_stride, int w, int h,
                               uint64_t *sse, uint64_t *sum) {
@@ -573,4 +573,4 @@ void aom_highbd_comp_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
     ref += ref_stride;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/variance.h b/aom_dsp/variance.h
index 81966fc6ba19a36eb5ecde6926e7715297f1697c..8bd10dd33174cabc32e10ed48f72fce66434b9c1 100644
--- a/aom_dsp/variance.h
+++ b/aom_dsp/variance.h
@@ -54,7 +54,7 @@ typedef unsigned int (*aom_subp_avg_variance_fn_t)(
     const uint8_t *a_ptr, int a_stride, int xoffset, int yoffset,
     const uint8_t *b_ptr, int b_stride, unsigned int *sse,
     const uint8_t *second_pred);
-#if CONFIG_VP10
+#if CONFIG_AV1
 typedef struct aom_variance_vtable {
   aom_sad_fn_t sdf;
   aom_sad_avg_fn_t sdaf;
@@ -65,7 +65,7 @@ typedef struct aom_variance_vtable {
   aom_sad_multi_fn_t sdx8f;
   aom_sad_multi_d_fn_t sdx4df;
 } aom_variance_fn_ptr_t;
-#endif  // CONFIG_VP10
+#endif  // CONFIG_AV1
 
 #ifdef __cplusplus
 }  // extern "C"
diff --git a/aom_dsp/x86/aom_asm_stubs.c b/aom_dsp/x86/aom_asm_stubs.c
index be8cba52589e368346ee19f2a53cfde71381af5d..1b71a9fd41d95ff6147decd97ddce5e7fd48f463 100644
--- a/aom_dsp/x86/aom_asm_stubs.c
+++ b/aom_dsp/x86/aom_asm_stubs.c
@@ -78,7 +78,7 @@ FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_, sse2);
 FUN_CONV_2D(, sse2);
 FUN_CONV_2D(avg_, sse2);
 
-#if CONFIG_VPX_HIGHBITDEPTH && ARCH_X86_64
+#if CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
 highbd_filter8_1dfunction aom_highbd_filter_block1d16_v8_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d16_h8_sse2;
 highbd_filter8_1dfunction aom_highbd_filter_block1d8_v8_sse2;
@@ -159,5 +159,5 @@ HIGH_FUN_CONV_1D(avg_vert, y_step_q4, filter_y, v, src - src_stride * 3, avg_,
 //                                    int w, int h, int bd);
 HIGH_FUN_CONV_2D(, sse2);
 HIGH_FUN_CONV_2D(avg_, sse2);
-#endif  // CONFIG_VPX_HIGHBITDEPTH && ARCH_X86_64
+#endif  // CONFIG_AOM_HIGHBITDEPTH && ARCH_X86_64
 #endif  // HAVE_SSE2
diff --git a/aom_dsp/x86/aom_convolve_copy_sse2.asm b/aom_dsp/x86/aom_convolve_copy_sse2.asm
index eb66e848061e70e85d381c7a6fbb8ae88f7113b8..4182c199f72cd014cc4b61f8e7606a07ca8db4d7 100644
--- a/aom_dsp/x86/aom_convolve_copy_sse2.asm
+++ b/aom_dsp/x86/aom_convolve_copy_sse2.asm
@@ -222,7 +222,7 @@ cglobal convolve_%1, 4, 7, 4+AUX_XMM_REGS, src, src_stride, \
 INIT_XMM sse2
 convolve_fn copy
 convolve_fn avg
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
 convolve_fn copy, highbd
 convolve_fn avg, highbd
 %endif
diff --git a/aom_dsp/x86/convolve.h b/aom_dsp/x86/convolve.h
index cf436543f25f58eb680cc3c644292d958c07bca3..dd453b09a52e08258aa856027d6e1140cc084dfa 100644
--- a/aom_dsp/x86/convolve.h
+++ b/aom_dsp/x86/convolve.h
@@ -104,7 +104,7 @@ typedef void filter8_1dfunction(const uint8_t *src_ptr, ptrdiff_t src_pitch,
     }                                                                         \
   }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 
 typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
                                        const ptrdiff_t src_pitch,
@@ -206,6 +206,6 @@ typedef void highbd_filter8_1dfunction(const uint16_t *src_ptr,
                                     w, h, bd);                                \
     }                                                                         \
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #endif  // VPX_DSP_X86_CONVOLVE_H_
diff --git a/aom_dsp/x86/fwd_txfm_sse2.c b/aom_dsp/x86/fwd_txfm_sse2.c
index 2afb212e946d0def5fc7d2425e7f34d8d3db26a9..4dcc67cc3f192bcb133b13542226488f3de47f45 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.c
+++ b/aom_dsp/x86/fwd_txfm_sse2.c
@@ -247,7 +247,7 @@ void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
 #define FDCT4x4_2D aom_highbd_fdct4x4_sse2
 #define FDCT8x8_2D aom_highbd_fdct8x8_sse2
@@ -269,4 +269,4 @@ void aom_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/fwd_txfm_sse2.h b/aom_dsp/x86/fwd_txfm_sse2.h
index 6149938dc93dd37a4d0729bd6d8e7928af8133c0..8e1a007acef7d680dd6d23494dba331d4c4f8884 100644
--- a/aom_dsp/x86/fwd_txfm_sse2.h
+++ b/aom_dsp/x86/fwd_txfm_sse2.h
@@ -245,7 +245,7 @@ static INLINE int k_check_epi32_overflow_32(
 }
 
 static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const __m128i zero = _mm_setzero_si128();
   const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
   __m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -254,11 +254,11 @@ static INLINE void store_output(const __m128i *poutput, tran_low_t *dst_ptr) {
   _mm_store_si128((__m128i *)(dst_ptr + 4), out1);
 #else
   _mm_store_si128((__m128i *)(dst_ptr), *poutput);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
 static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const __m128i zero = _mm_setzero_si128();
   const __m128i sign_bits = _mm_cmplt_epi16(*poutput, zero);
   __m128i out0 = _mm_unpacklo_epi16(*poutput, sign_bits);
@@ -267,7 +267,7 @@ static INLINE void storeu_output(const __m128i *poutput, tran_low_t *dst_ptr) {
   _mm_storeu_si128((__m128i *)(dst_ptr + 4), out1);
 #else
   _mm_storeu_si128((__m128i *)(dst_ptr), *poutput);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
 static INLINE __m128i mult_round_shift(const __m128i *pin0, const __m128i *pin1,
diff --git a/aom_dsp/x86/highbd_quantize_intrin_sse2.c b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
index 975dde788b24e961387f7dcbe6e07d0b50895ad2..598f3d23a1acfeb3fbb414b34346f731f20c1b44 100644
--- a/aom_dsp/x86/highbd_quantize_intrin_sse2.c
+++ b/aom_dsp/x86/highbd_quantize_intrin_sse2.c
@@ -15,7 +15,7 @@
 #include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_highbd_quantize_b_sse2(const tran_low_t *coeff_ptr, intptr_t count,
                                 int skip_block, const int16_t *zbin_ptr,
                                 const int16_t *round_ptr,
diff --git a/aom_dsp/x86/inv_txfm_sse2.c b/aom_dsp/x86/inv_txfm_sse2.c
index 1a8359fe024acc10f05da7ee2005e761d3c01765..548929dccbb692a227abae0bdd2b5a713eb90ed4 100644
--- a/aom_dsp/x86/inv_txfm_sse2.c
+++ b/aom_dsp/x86/inv_txfm_sse2.c
@@ -3473,7 +3473,7 @@ void aom_idct32x32_1_add_sse2(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   __m128i ubounded, retval;
   const __m128i zero = _mm_set1_epi16(0);
@@ -4035,4 +4035,4 @@ void aom_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
     }
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_dsp/x86/inv_txfm_sse2.h b/aom_dsp/x86/inv_txfm_sse2.h
index ddb680fc9de7c7f37c786890ff1dbe754c329c08..dbe233de6759fa629fd1612329564881aa4818f9 100644
--- a/aom_dsp/x86/inv_txfm_sse2.h
+++ b/aom_dsp/x86/inv_txfm_sse2.h
@@ -94,7 +94,7 @@ static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
 // Function to allow 8 bit optimisations to be used when profile 0 is used with
 // highbitdepth enabled
 static INLINE __m128i load_input_data(const tran_low_t *data) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
                         data[6], data[7]);
 #else
diff --git a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
index a835161ea02de3d2a9db9bcf5a72392402239091..3890926678d5d12d60a4df6a0ced2a8922347f8f 100644
--- a/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
+++ b/aom_dsp/x86/inv_txfm_ssse3_x86_64.asm
@@ -220,7 +220,7 @@ cglobal idct8x8_64_add, 3, 5, 13, input, output, stride
   mova    m12, [pw_11585x2]
 
   lea      r3, [2 * strideq]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova     m0, [inputq +   0]
   packssdw m0, [inputq +  16]
   mova     m1, [inputq +  32]
@@ -271,7 +271,7 @@ cglobal idct8x8_12_add, 3, 5, 13, input, output, stride
 
   lea        r3, [2 * strideq]
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova       m0, [inputq +   0]
   packssdw   m0, [inputq +  16]
   mova       m1, [inputq +  32]
@@ -793,7 +793,7 @@ idct32x32_34:
   lea             r4, [rsp + transposed_in]
 
 idct32x32_34_transpose:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0, [r3 +       0]
   packssdw        m0, [r3 +      16]
   mova            m1, [r3 + 32 *  4]
@@ -1223,7 +1223,7 @@ idct32x32_135:
   mov             r7, 2
 
 idct32x32_135_transpose:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0, [r3 +       0]
   packssdw        m0, [r3 +      16]
   mova            m1, [r3 + 32 *  4]
@@ -1261,7 +1261,7 @@ idct32x32_135_transpose:
   mova [r4 + 16 * 6], m6
   mova [r4 + 16 * 7], m7
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   add             r3, 32
 %else
   add             r3, 16
@@ -1272,7 +1272,7 @@ idct32x32_135_transpose:
 
   IDCT32X32_135 16*0, 16*32, 16*64, 16*96
   lea            stp, [stp + 16 * 8]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea         inputq, [inputq + 32 * 32]
 %else
   lea         inputq, [inputq + 16 * 32]
@@ -1687,7 +1687,7 @@ idct32x32_1024:
   mov             r7, 4
 
 idct32x32_1024_transpose:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0, [r3 +       0]
   packssdw        m0, [r3 +      16]
   mova            m1, [r3 + 32 *  4]
@@ -1725,7 +1725,7 @@ idct32x32_1024_transpose:
   mova [r4 + 16 * 5], m5
   mova [r4 + 16 * 6], m6
   mova [r4 + 16 * 7], m7
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   add             r3, 32
 %else
   add             r3, 16
@@ -1737,7 +1737,7 @@ idct32x32_1024_transpose:
   IDCT32X32_1024 16*0, 16*32, 16*64, 16*96
 
   lea            stp, [stp + 16 * 8]
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea         inputq, [inputq + 32 * 32]
 %else
   lea         inputq, [inputq + 16 * 32]
diff --git a/aom_dsp/x86/inv_wht_sse2.asm b/aom_dsp/x86/inv_wht_sse2.asm
index eec504755016fb229a4cb97dd89f23d6f4b143bd..ee805633c8085c9acb3acfdfdecf4bc3eba302a8 100644
--- a/aom_dsp/x86/inv_wht_sse2.asm
+++ b/aom_dsp/x86/inv_wht_sse2.asm
@@ -82,7 +82,7 @@ SECTION .text
 
 INIT_XMM sse2
 cglobal iwht4x4_16_add, 3, 3, 7, input, output, stride
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova            m0,        [inputq +  0]
   packssdw        m0,        [inputq + 16]
   mova            m1,        [inputq + 32]
diff --git a/aom_dsp/x86/quantize_avx_x86_64.asm b/aom_dsp/x86/quantize_avx_x86_64.asm
index b3d78c482f558cda1f3808d76431a8698e06abf5..b74d6ea6886dd01745f8411235920325e3d8e7a1 100644
--- a/aom_dsp/x86/quantize_avx_x86_64.asm
+++ b/aom_dsp/x86/quantize_avx_x86_64.asm
@@ -41,7 +41,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   mova                            m0, [zbinq]              ; m0 = zbin
 
   ; Get DC and first 15 AC coeffs - in this special case, that is all.
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; coeff stored as 32bit numbers but we process them as 16 bit numbers
   mova                            m9, [coeffq]
   packssdw                        m9, [coeffq+16]          ; m9 = c[i]
@@ -73,7 +73,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   ptest                          m14, m14
   jnz .single_nonzero
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova                       [r1   ], ymm5
   mova                       [r1+32], ymm5
   mova                       [r2   ], ymm5
@@ -121,7 +121,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   pand                            m8, m7
   pand                           m13, m12
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -142,7 +142,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   punpckhqdq                      m3, m3
   pmullw                         m13, m3                   ; dqc[i] = qc[i] * q
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; Store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -226,7 +226,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
 
   DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                         coeffq, [  coeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -239,7 +239,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   neg                        ncoeffq
 
   ; get DC and first 15 AC coeffs
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; coeff stored as 32bit numbers & require 16bit numbers
   mova                            m9, [coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [coeffq+ncoeffq*4+16]
@@ -261,7 +261,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   ptest                          m14, m14
   jnz .first_nonzero
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova        [qcoeffq+ncoeffq*4   ], ymm5
   mova        [qcoeffq+ncoeffq*4+32], ymm5
   mova       [dqcoeffq+ncoeffq*4   ], ymm5
@@ -299,7 +299,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   pand                            m8, m7
   pand                           m13, m12
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -330,7 +330,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   psignw                         m13, m10
 %endif
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m8
   punpckhwd                       m6, m8, m6
@@ -360,7 +360,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
 
 .ac_only_loop:
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; pack coeff from 32bit to 16bit array
   mova                            m9, [coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [coeffq+ncoeffq*4+16]
@@ -382,7 +382,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   ptest                          m14, m14
   jnz .rest_nonzero
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova        [qcoeffq+ncoeffq*4+ 0], ymm5
   mova        [qcoeffq+ncoeffq*4+32], ymm5
   mova       [dqcoeffq+ncoeffq*4+ 0], ymm5
@@ -421,7 +421,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   pand                           m14, m7
   pand                           m13, m12
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m14
   punpckhwd                       m6, m14, m6
@@ -451,7 +451,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
   psignw                         m13, m10
 %endif
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pcmpgtw                         m6, m5, m14
   punpckhwd                       m6, m14, m6
@@ -507,7 +507,7 @@ DEFINE_ARGS coeff, ncoeff, skip, zbin, round, quant, shift, \
 
 DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
 %else
@@ -519,7 +519,7 @@ DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
   pxor                            m7, m7
 
 .blank_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova       [dqcoeffq+ncoeffq*4+ 0], ymm7
   mova       [dqcoeffq+ncoeffq*4+32], ymm7
   mova        [qcoeffq+ncoeffq*4+ 0], ymm7
diff --git a/aom_dsp/x86/quantize_sse2.c b/aom_dsp/x86/quantize_sse2.c
index 3a2655fe3cfc667e4cf0cb07349ca26f8d7fc0e1..39ce529d26c13da5957a722d1d3d738af9f270ed 100644
--- a/aom_dsp/x86/quantize_sse2.c
+++ b/aom_dsp/x86/quantize_sse2.c
@@ -16,7 +16,7 @@
 #include "aom/aom_integer.h"
 
 static INLINE __m128i load_coefficients(const tran_low_t* coeff_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   return _mm_setr_epi16((int16_t)coeff_ptr[0], (int16_t)coeff_ptr[1],
                         (int16_t)coeff_ptr[2], (int16_t)coeff_ptr[3],
                         (int16_t)coeff_ptr[4], (int16_t)coeff_ptr[5],
@@ -28,7 +28,7 @@ static INLINE __m128i load_coefficients(const tran_low_t* coeff_ptr) {
 
 static INLINE void store_coefficients(__m128i coeff_vals,
                                       tran_low_t* coeff_ptr) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   __m128i one = _mm_set1_epi16(1);
   __m128i coeff_vals_hi = _mm_mulhi_epi16(coeff_vals, one);
   __m128i coeff_vals_lo = _mm_mullo_epi16(coeff_vals, one);
diff --git a/aom_dsp/x86/quantize_ssse3_x86_64.asm b/aom_dsp/x86/quantize_ssse3_x86_64.asm
index f97ee57eab7c76e9b2c64067ef4889f098192ce0..4503370b544b55deced0f27cb5dc27833b90df67 100644
--- a/aom_dsp/x86/quantize_ssse3_x86_64.asm
+++ b/aom_dsp/x86/quantize_ssse3_x86_64.asm
@@ -53,7 +53,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
 %endif
   pxor                            m5, m5                   ; m5 = dedicated zero
   DEFINE_ARGS coeff, ncoeff, d1, qcoeff, dqcoeff, iscan, d2, d3, d4, d5, eob
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                         coeffq, [  coeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
@@ -66,7 +66,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   neg                        ncoeffq
 
   ; get DC and first 15 AC coeffs
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; coeff stored as 32bit numbers & require 16bit numbers
   mova                            m9, [  coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [  coeffq+ncoeffq*4+16]
@@ -96,7 +96,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   psignw                         m13, m10                  ; m13 = reinsert sign
   pand                            m8, m7
   pand                           m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   mova                           m11, m8
   mova                            m6, m8
@@ -131,7 +131,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   psignw                          m8, m9
   psignw                         m13, m10
 %endif
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   mova                            m11, m8
   mova                            m6, m8
@@ -166,7 +166,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   jz .accumulate_eob
 
 .ac_only_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; pack coeff from 32bit to 16bit array
   mova                            m9, [  coeffq+ncoeffq*4+ 0]
   packssdw                        m9, [  coeffq+ncoeffq*4+16]
@@ -198,7 +198,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   psignw                         m13, m10                  ; m13 = reinsert sign
   pand                           m14, m7
   pand                           m13, m12
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   pxor                           m11, m11
   mova                           m11, m14
@@ -233,7 +233,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   psignw                         m14, m9
   psignw                         m13, m10
 %endif
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   ; store 16bit numbers as 32bit numbers in array pointed to by qcoeff
   mova                           m11, m14
   mova                            m6, m14
@@ -271,7 +271,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
 %ifidn %1, b_32x32
   jmp .accumulate_eob
 .skip_iter:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova        [qcoeffq+ncoeffq*4+ 0], m5
   mova        [qcoeffq+ncoeffq*4+16], m5
   mova        [qcoeffq+ncoeffq*4+32], m5
@@ -310,7 +310,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   mov                             r2, qcoeffmp
   mov                             r3, eobmp
   DEFINE_ARGS dqcoeff, ncoeff, qcoeff, eob
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   lea                       dqcoeffq, [dqcoeffq+ncoeffq*4]
   lea                        qcoeffq, [ qcoeffq+ncoeffq*4]
 %else
@@ -320,7 +320,7 @@ cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
   neg                        ncoeffq
   pxor                            m7, m7
 .blank_loop:
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   mova       [dqcoeffq+ncoeffq*4+ 0], m7
   mova       [dqcoeffq+ncoeffq*4+16], m7
   mova       [dqcoeffq+ncoeffq*4+32], m7
diff --git a/aom_mem/aom_mem.c b/aom_mem/aom_mem.c
index 68693527862bd6e86ebae4e2b795e314172a993c..cbfc704e685e6b6dc3973137dae826fb9b0ee481 100644
--- a/aom_mem/aom_mem.c
+++ b/aom_mem/aom_mem.c
@@ -87,11 +87,11 @@ void aom_free(void *memblk) {
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void *aom_memset16(void *dest, int val, size_t length) {
   size_t i;
   uint16_t *dest16 = (uint16_t *)dest;
   for (i = 0; i < length; i++) *dest16++ = val;
   return dest;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/aom_mem/aom_mem.h b/aom_mem/aom_mem.h
index 32744eb6d4db36e9062f46f04f61d3c13be41927..ae5fe3a11e3efe4d61e25d049ac3824214b1f6f3 100644
--- a/aom_mem/aom_mem.h
+++ b/aom_mem/aom_mem.h
@@ -30,7 +30,7 @@ void *aom_calloc(size_t num, size_t size);
 void *aom_realloc(void *memblk, size_t size);
 void aom_free(void *memblk);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void *aom_memset16(void *dest, int val, size_t length);
 #endif
 
diff --git a/aom_ports/mem.h b/aom_ports/mem.h
index 08337a787113b1b0967ee0bccf76f82670251b81..c06c50cf25e260f07bab4809c52d2f862657521c 100644
--- a/aom_ports/mem.h
+++ b/aom_ports/mem.h
@@ -44,9 +44,9 @@
 #define ALIGN_POWER_OF_TWO(value, n) \
   (((value) + ((1 << (n)) - 1)) & ~((1 << (n)) - 1))
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define CONVERT_TO_SHORTPTR(x) ((uint16_t *)(((uintptr_t)x) << 1))
 #define CONVERT_TO_BYTEPTR(x) ((uint8_t *)(((uintptr_t)x) >> 1))
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #endif  // VPX_PORTS_MEM_H_
diff --git a/aom_scale/aom_scale_rtcd.pl b/aom_scale/aom_scale_rtcd.pl
index 856abcd8e9da7430ee6e0c85ca0231bd4c280482..6a3c65c75da9c806897dd80fb5543a127c643aca 100644
--- a/aom_scale/aom_scale_rtcd.pl
+++ b/aom_scale/aom_scale_rtcd.pl
@@ -22,7 +22,7 @@ add_proto qw/void aom_yv12_copy_frame/, "const struct yv12_buffer_config *src_yb
 
 add_proto qw/void aom_yv12_copy_y/, "const struct yv12_buffer_config *src_ybc, struct yv12_buffer_config *dst_ybc";
 
-if (aom_config("CONFIG_VP10") eq "yes") {
+if (aom_config("CONFIG_AV1") eq "yes") {
     add_proto qw/void aom_extend_frame_borders/, "struct yv12_buffer_config *ybf";
     specialize qw/aom_extend_frame_borders dspr2/;
 
diff --git a/aom_scale/generic/yv12config.c b/aom_scale/generic/yv12config.c
index b7897227f9bde3ca9d1e2cab8f35731f2de26fa2..554965b1db6750cd5d22aa9813a53cbdee4d0ebc 100644
--- a/aom_scale/generic/yv12config.c
+++ b/aom_scale/generic/yv12config.c
@@ -114,7 +114,7 @@ int aom_yv12_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
   return -2;
 }
 
-#if CONFIG_VP10
+#if CONFIG_AV1
 // TODO(jkoleszar): Maybe replace this with struct aom_image
 
 int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
@@ -136,7 +136,7 @@ int aom_free_frame_buffer(YV12_BUFFER_CONFIG *ybf) {
 
 int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                              int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                              int use_highbitdepth,
 #endif
                              int border, int byte_alignment,
@@ -166,21 +166,21 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
     const uint64_t alpha_plane_size =
         (alpha_height + 2 * alpha_border_h) * (uint64_t)alpha_stride +
         byte_alignment;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint64_t frame_size =
         (1 + use_highbitdepth) *
         (yplane_size + 2 * uvplane_size + alpha_plane_size);
 #else
     const uint64_t frame_size =
         yplane_size + 2 * uvplane_size + alpha_plane_size;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #else
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint64_t frame_size =
         (1 + use_highbitdepth) * (yplane_size + 2 * uvplane_size);
 #else
     const uint64_t frame_size = yplane_size + 2 * uvplane_size;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_ALPHA
 
     uint8_t *buf = NULL;
@@ -251,7 +251,7 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
     ybf->subsampling_y = ss_y;
 
     buf = ybf->buffer_alloc;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (use_highbitdepth) {
       // Store uint16 addresses when using 16bit framebuffers
       buf = CONVERT_TO_BYTEPTR(ybf->buffer_alloc);
@@ -259,7 +259,7 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
     } else {
       ybf->flags = 0;
     }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     ybf->y_buffer = (uint8_t *)yv12_align_addr(
         buf + (border * y_stride) + border, aom_byte_align);
@@ -288,14 +288,14 @@ int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
 
 int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                            int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                            int use_highbitdepth,
 #endif
                            int border, int byte_alignment) {
   if (ybf) {
     aom_free_frame_buffer(ybf);
     return aom_realloc_frame_buffer(ybf, width, height, ss_x, ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                     use_highbitdepth,
 #endif
                                     border, byte_alignment, NULL, NULL, NULL);
diff --git a/aom_scale/generic/yv12extend.c b/aom_scale/generic/yv12extend.c
index 2dacbed901e8c2442d2aac67ce94d097da2123c2..017f6d1a9696dd51622f74cdcbf95209e38cac79 100644
--- a/aom_scale/generic/yv12extend.c
+++ b/aom_scale/generic/yv12extend.c
@@ -16,7 +16,7 @@
 #include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_scale/yv12config.h"
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #include "av1/common/common.h"
 #endif
 
@@ -60,7 +60,7 @@ static void extend_plane(uint8_t *const src, int src_stride, int width,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void extend_plane_high(uint8_t *const src8, int src_stride, int width,
                               int height, int extend_top, int extend_left,
                               int extend_bottom, int extend_right) {
@@ -112,7 +112,7 @@ void aom_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
   assert(ybf->y_height - ybf->y_crop_height >= 0);
   assert(ybf->y_width - ybf->y_crop_width >= 0);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
     extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
                       ybf->y_crop_height, ybf->border, ybf->border,
@@ -147,7 +147,7 @@ void aom_yv12_extend_frame_borders_c(YV12_BUFFER_CONFIG *ybf) {
                uv_border + ybf->uv_width - ybf->uv_crop_width);
 }
 
-#if CONFIG_VP10
+#if CONFIG_AV1
 static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) {
   const int c_w = ybf->uv_crop_width;
   const int c_h = ybf->uv_crop_height;
@@ -163,7 +163,7 @@ static void extend_frame(YV12_BUFFER_CONFIG *const ybf, int ext_size) {
   assert(ybf->y_height - ybf->y_crop_height >= 0);
   assert(ybf->y_width - ybf->y_crop_width >= 0);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (ybf->flags & YV12_FLAG_HIGHBITDEPTH) {
     extend_plane_high(ybf->y_buffer, ybf->y_stride, ybf->y_crop_width,
                       ybf->y_crop_height, ext_size, ext_size,
@@ -197,14 +197,14 @@ void aom_extend_frame_inner_borders_c(YV12_BUFFER_CONFIG *ybf) {
   extend_frame(ybf, inner_bw);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void memcpy_short_addr(uint8_t *dst8, const uint8_t *src8, int num) {
   uint16_t *dst = CONVERT_TO_SHORTPTR(dst8);
   uint16_t *src = CONVERT_TO_SHORTPTR(src8);
   memcpy(dst, src, num * sizeof(uint16_t));
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
-#endif  // CONFIG_VP10
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+#endif  // CONFIG_AV1
 
 // Copies the source image into the destination image and updates the
 // destination's UMV borders.
@@ -223,7 +223,7 @@ void aom_yv12_copy_frame_c(const YV12_BUFFER_CONFIG *src_ybc,
   assert(src_ybc->y_height == dst_ybc->y_height);
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
     assert(dst_ybc->flags & YV12_FLAG_HIGHBITDEPTH);
     for (row = 0; row < src_ybc->y_height; ++row) {
@@ -290,7 +290,7 @@ void aom_yv12_copy_y_c(const YV12_BUFFER_CONFIG *src_ybc,
   const uint8_t *src = src_ybc->y_buffer;
   uint8_t *dst = dst_ybc->y_buffer;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src_ybc->flags & YV12_FLAG_HIGHBITDEPTH) {
     const uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
     uint16_t *dst16 = CONVERT_TO_SHORTPTR(dst);
diff --git a/aom_scale/yv12config.h b/aom_scale/yv12config.h
index 3b0e044b7fe1567fc614b74310c49dd06a7e8da3..9a2fce36df3f84895602fc69be2005b342de2a28 100644
--- a/aom_scale/yv12config.h
+++ b/aom_scale/yv12config.h
@@ -75,7 +75,7 @@ int aom_yv12_de_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf);
 
 int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                            int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                            int use_highbitdepth,
 #endif
                            int border, int byte_alignment);
@@ -89,7 +89,7 @@ int aom_alloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
 // on failure.
 int aom_realloc_frame_buffer(YV12_BUFFER_CONFIG *ybf, int width, int height,
                              int ss_x, int ss_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                              int use_highbitdepth,
 #endif
                              int border, int byte_alignment,
diff --git a/aomdec.c b/aomdec.c
index 282418997861d0a2aaa05c988bd52cc09a6bd7b4..9230b3ce77423941bfa571add25fddaf05c5d217 100644
--- a/aomdec.c
+++ b/aomdec.c
@@ -29,7 +29,7 @@
 #include "aom_ports/mem_ops.h"
 #include "aom_ports/aom_timer.h"
 
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
 #include "aom/vp8dx.h"
 #endif
 
@@ -89,7 +89,7 @@ static const arg_def_t fb_arg =
     ARG_DEF(NULL, "frame-buffers", 1, "Number of frame buffers to use");
 static const arg_def_t md5arg =
     ARG_DEF(NULL, "md5", 0, "Compute the MD5 sum of the decoded frame");
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const arg_def_t outbitdeptharg =
     ARG_DEF(NULL, "output-bit-depth", 1, "Output bit-depth for decoded frames");
 #endif
@@ -100,7 +100,7 @@ static const arg_def_t *all_args[] = {
   &progressarg, &limitarg, &skiparg, &postprocarg, &summaryarg, &outputfile,
   &threadsarg, &frameparallelarg, &verbosearg, &scalearg, &fb_arg,
   &md5arg, &error_concealment, &continuearg,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   &outbitdeptharg,
 #endif
   NULL
@@ -110,7 +110,7 @@ static const arg_def_t *all_args[] = {
 #if CONFIG_LIBYUV
 static INLINE int libyuv_scale(aom_image_t *src, aom_image_t *dst,
                                FilterModeEnum mode) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src->fmt == VPX_IMG_FMT_I42016) {
     assert(dst->fmt == VPX_IMG_FMT_I42016);
     return I420Scale_16(
@@ -253,7 +253,7 @@ static void update_image_md5(const aom_image_t *img, const int planes[3],
 static void write_image_file(const aom_image_t *img, const int planes[3],
                              FILE *file) {
   int i, y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int bytes_per_sample = ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) ? 2 : 1);
 #else
   const int bytes_per_sample = 1;
@@ -454,7 +454,7 @@ static FILE *open_outfile(const char *name) {
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int img_shifted_realloc_required(const aom_image_t *img,
                                         const aom_image_t *shifted,
                                         aom_img_fmt_t required_fmt) {
@@ -487,14 +487,14 @@ static int main_loop(int argc, const char **argv_) {
   int opt_yv12 = 0;
   int opt_i420 = 0;
   aom_codec_dec_cfg_t cfg = { 0, 0, 0 };
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   unsigned int output_bit_depth = 0;
 #endif
   int frames_corrupted = 0;
   int dec_flags = 0;
   int do_scale = 0;
   aom_image_t *scaled_img = NULL;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   aom_image_t *img_shifted = NULL;
 #endif
   int frame_avail, got_data, flush_decoder = 0;
@@ -561,7 +561,7 @@ static int main_loop(int argc, const char **argv_) {
       summary = 1;
     else if (arg_match(&arg, &threadsarg, argi))
       cfg.threads = arg_parse_uint(&arg);
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
     else if (arg_match(&arg, &frameparallelarg, argi))
       frame_parallel = 1;
 #endif
@@ -573,7 +573,7 @@ static int main_loop(int argc, const char **argv_) {
       num_external_frame_buffers = arg_parse_uint(&arg);
     else if (arg_match(&arg, &continuearg, argi))
       keep_going = 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     else if (arg_match(&arg, &outbitdeptharg, argi)) {
       output_bit_depth = arg_parse_uint(&arg);
     }
@@ -806,7 +806,7 @@ static int main_loop(int argc, const char **argv_) {
 #endif
         }
       }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       // Default to codec bit depth if output bit depth not set
       if (!output_bit_depth && single_file && !do_md5) {
         output_bit_depth = img->bit_depth;
@@ -941,7 +941,7 @@ fail:
   if (input.aom_input_ctx->file_type != FILE_TYPE_WEBM) free(buf);
 
   if (scaled_img) aom_img_free(scaled_img);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (img_shifted) aom_img_free(img_shifted);
 #endif
 
diff --git a/aomenc.c b/aomenc.c
index c6e2ec02360feb59d3a5327e90d642b5ac5e6352..552126c5748e00fe501239e6cfebee2bd3704e63 100644
--- a/aomenc.c
+++ b/aomenc.c
@@ -33,10 +33,10 @@
 #include "./ivfenc.h"
 #include "./tools_common.h"
 
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 #include "aom/vp8cx.h"
 #endif
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
 #include "aom/vp8dx.h"
 #endif
 
@@ -195,7 +195,7 @@ static const arg_def_t disable_warning_prompt =
     ARG_DEF("y", "disable-warning-prompt", 0,
             "Display warnings, but do not prompt user to continue.");
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const arg_def_t test16bitinternalarg = ARG_DEF(
     NULL, "test-16bit-internal", 0, "Force use of 16 bit internal buffer");
 #endif
@@ -268,7 +268,7 @@ static const arg_def_t *global_args[] = { &use_yv12,
                                           &timebase,
                                           &framerate,
                                           &error_resilient,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                           &test16bitinternalarg,
 #endif
                                           &lag_in_frames,
@@ -357,7 +357,7 @@ static const arg_def_t cq_level =
 static const arg_def_t max_intra_rate_pct =
     ARG_DEF(NULL, "max-intra-rate", 1, "Max I-frame bitrate (pct)");
 
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 static const arg_def_t cpu_used_vp9 =
     ARG_DEF(NULL, "cpu-used", 1, "CPU Used (-8..8)");
 static const arg_def_t tile_cols =
@@ -411,7 +411,7 @@ static const arg_def_t input_color_space =
     ARG_DEF_ENUM(NULL, "color-space", 1, "The color space of input content:",
                  color_space_enum);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const struct arg_enum_list bitdepth_enum[] = {
   { "8", VPX_BITS_8 }, { "10", VPX_BITS_10 }, { "12", VPX_BITS_12 }, { NULL, 0 }
 };
@@ -434,9 +434,9 @@ static const arg_def_t tune_content = ARG_DEF_ENUM(
     NULL, "tune-content", 1, "Tune content type", tune_content_enum);
 #endif
 
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 /* clang-format off */
-static const arg_def_t *vp10_args[] = {
+static const arg_def_t *av1_args[] = {
   &cpu_used_vp9,            &auto_altref,      &sharpness,
   &static_thresh,           &tile_cols,        &tile_rows,
   &arnr_maxframes,          &arnr_strength,    &arnr_type,
@@ -449,7 +449,7 @@ static const arg_def_t *vp10_args[] = {
   &noise_sens,              &tune_content,     &input_color_space,
   &min_gf_interval,         &max_gf_interval,  NULL
 };
-static const int vp10_arg_ctrl_map[] = {
+static const int av1_arg_ctrl_map[] = {
   VP8E_SET_CPUUSED,                 VP8E_SET_ENABLEAUTOALTREF,
   VP8E_SET_SHARPNESS,               VP8E_SET_STATIC_THRESHOLD,
   VP9E_SET_TILE_COLUMNS,            VP9E_SET_TILE_ROWS,
@@ -490,9 +490,9 @@ void usage_exit(void) {
   arg_show_usage(stderr, rc_twopass_args);
   fprintf(stderr, "\nKeyframe Placement Options:\n");
   arg_show_usage(stderr, kf_args);
-#if CONFIG_VP10_ENCODER
-  fprintf(stderr, "\nVP10 Specific Options:\n");
-  arg_show_usage(stderr, vp10_args);
+#if CONFIG_AV1_ENCODER
+  fprintf(stderr, "\nAV1 Specific Options:\n");
+  arg_show_usage(stderr, av1_args);
 #endif
   fprintf(stderr,
           "\nStream timebase (--timebase):\n"
@@ -514,7 +514,7 @@ void usage_exit(void) {
 
 #define mmin(a, b) ((a) < (b) ? (a) : (b))
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void find_mismatch_high(const aom_image_t *const img1,
                                const aom_image_t *const img2, int yloc[4],
                                int uloc[4], int vloc[4]) {
@@ -711,7 +711,7 @@ static int compare_img(const aom_image_t *const img1,
   match &= (img1->fmt == img2->fmt);
   match &= (img1->d_w == img2->d_w);
   match &= (img1->d_h == img2->d_h);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (img1->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
     l_w *= 2;
     c_w *= 2;
@@ -737,8 +737,8 @@ static int compare_img(const aom_image_t *const img1,
 }
 
 #define NELEMENTS(x) (sizeof(x) / sizeof(x[0]))
-#if CONFIG_VP10_ENCODER
-#define ARG_CTRL_CNT_MAX NELEMENTS(vp10_arg_ctrl_map)
+#if CONFIG_AV1_ENCODER
+#define ARG_CTRL_CNT_MAX NELEMENTS(av1_arg_ctrl_map)
 #endif
 
 #if !CONFIG_WEBM_IO
@@ -761,7 +761,7 @@ struct stream_config {
   int arg_ctrl_cnt;
   int write_webm;
   int have_kf_max_dist;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   // whether to use 16bit internal buffers
   int use_16bit_internal;
 #endif
@@ -898,7 +898,7 @@ static void parse_global_config(struct VpxEncoderConfig *global, char **argv) {
   }
   /* Validate global config */
   if (global->passes == 0) {
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
     // Make default VP9 passes = 2 until there is a better quality 1-pass
     // encoder
     if (global->codec != NULL && global->codec->name != NULL)
@@ -1032,18 +1032,18 @@ static int parse_stream_params(struct VpxEncoderConfig *global,
   static const int *ctrl_args_map = NULL;
   struct stream_config *config = &stream->config;
   int eos_mark_found = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int test_16bit_internal = 0;
 #endif
 
   // Handle codec specific options
   if (0) {
-#if CONFIG_VP10_ENCODER
-  } else if (strcmp(global->codec->name, "vp10") == 0) {
+#if CONFIG_AV1_ENCODER
+  } else if (strcmp(global->codec->name, "av1") == 0) {
     // TODO(jingning): Reuse VP9 specific encoder configuration parameters.
-    // Consider to expand this set for VP10 encoder control.
-    ctrl_args = vp10_args;
-    ctrl_args_map = vp10_arg_ctrl_map;
+    // Consider to expand this set for AV1 encoder control.
+    ctrl_args = av1_args;
+    ctrl_args_map = av1_arg_ctrl_map;
 #endif
   }
 
@@ -1085,7 +1085,7 @@ static int parse_stream_params(struct VpxEncoderConfig *global,
       config->cfg.g_w = arg_parse_uint(&arg);
     } else if (arg_match(&arg, &height, argi)) {
       config->cfg.g_h = arg_parse_uint(&arg);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else if (arg_match(&arg, &bitdeptharg, argi)) {
       config->cfg.g_bit_depth = arg_parse_enum_or_int(&arg);
     } else if (arg_match(&arg, &inbitdeptharg, argi)) {
@@ -1158,10 +1158,10 @@ static int parse_stream_params(struct VpxEncoderConfig *global,
       config->have_kf_max_dist = 1;
     } else if (arg_match(&arg, &kf_disabled, argi)) {
       config->cfg.kf_mode = VPX_KF_DISABLED;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else if (arg_match(&arg, &test16bitinternalarg, argi)) {
       if (strcmp(global->codec->name, "vp9") == 0 ||
-          strcmp(global->codec->name, "vp10") == 0) {
+          strcmp(global->codec->name, "av1") == 0) {
         test_16bit_internal = 1;
       }
 #endif
@@ -1192,9 +1192,9 @@ static int parse_stream_params(struct VpxEncoderConfig *global,
       if (!match) argj++;
     }
   }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (strcmp(global->codec->name, "vp9") == 0 ||
-      strcmp(global->codec->name, "vp10") == 0) {
+      strcmp(global->codec->name, "av1") == 0) {
     config->use_16bit_internal =
         test_16bit_internal | (config->cfg.g_profile > 1);
   }
@@ -1461,7 +1461,7 @@ static void initialize_encoder(struct stream_state *stream,
 
   flags |= global->show_psnr ? VPX_CODEC_USE_PSNR : 0;
   flags |= global->out_part ? VPX_CODEC_USE_OUTPUT_PARTITION : 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   flags |= stream->config.use_16bit_internal ? VPX_CODEC_USE_HIGHBITDEPTH : 0;
 #endif
 
@@ -1506,7 +1506,7 @@ static void encode_frame(struct stream_state *stream,
       cfg->g_timebase.num / global->framerate.num;
 
 /* Scale if necessary */
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (img) {
     if ((img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) &&
         (img->d_w != cfg->g_w || img->d_h != cfg->g_h)) {
@@ -1721,7 +1721,7 @@ static void test_decode(struct stream_state *stream,
   enc_img = ref_enc.img;
   aom_codec_control(&stream->decoder, VP9_GET_REFERENCE, &ref_dec);
   dec_img = ref_dec.img;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if ((enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) !=
       (dec_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH)) {
     if (enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
@@ -1741,7 +1741,7 @@ static void test_decode(struct stream_state *stream,
 
   if (!compare_img(&enc_img, &dec_img)) {
     int y[4], u[4], v[4];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (enc_img.fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
       find_mismatch_high(&enc_img, &dec_img, y, u, v);
     } else {
@@ -1787,7 +1787,7 @@ static void print_time(const char *label, int64_t etl) {
 int main(int argc, const char **argv_) {
   int pass;
   aom_image_t raw;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   aom_image_t raw_shift;
   int allocated_raw_shift = 0;
   int use_16bit_internal = 0;
@@ -1857,7 +1857,7 @@ int main(int argc, const char **argv_) {
   if (!input.filename) usage_exit();
 
   /* Decide if other chroma subsamplings than 4:2:0 are supported */
-  if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == VP10_FOURCC)
+  if (global.codec->fourcc == VP9_FOURCC || global.codec->fourcc == AV1_FOURCC)
     input.only_i420 = 0;
 
   for (pass = global.pass ? global.pass - 1 : 0; pass < global.passes; pass++) {
@@ -1964,9 +1964,9 @@ int main(int argc, const char **argv_) {
         open_output_file(stream, &global, &input.pixel_aspect_ratio));
     FOREACH_STREAM(initialize_encoder(stream, &global));
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (strcmp(global.codec->name, "vp9") == 0 ||
-        strcmp(global.codec->name, "vp10") == 0) {
+        strcmp(global.codec->name, "av1") == 0) {
       // Check to see if at least one stream uses 16 bit internal.
       // Currently assume that the bit_depths for all streams using
       // highbitdepth are the same.
@@ -2018,7 +2018,7 @@ int main(int argc, const char **argv_) {
         frame_avail = 0;
 
       if (frames_in > global.skip_frames) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         aom_image_t *frame_to_encode;
         if (input_shift || (use_16bit_internal && input.bit_depth == 8)) {
           assert(use_16bit_internal);
@@ -2174,7 +2174,7 @@ int main(int argc, const char **argv_) {
     });
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (allocated_raw_shift) aom_img_free(&raw_shift);
 #endif
   aom_img_free(&raw);
diff --git a/av1/av1_common.mk b/av1/av1_common.mk
index 99cbe46ab69badcc43116b7a4908ebd6ee917d92..020095f5f14bf0404eb9449b99df131b4f23487d 100644
--- a/av1/av1_common.mk
+++ b/av1/av1_common.mk
@@ -8,91 +8,91 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-VP10_COMMON_SRCS-yes += av1_common.mk
-VP10_COMMON_SRCS-yes += av1_iface_common.h
-VP10_COMMON_SRCS-yes += common/alloccommon.c
-VP10_COMMON_SRCS-yes += common/blockd.c
-VP10_COMMON_SRCS-yes += common/debugmodes.c
-VP10_COMMON_SRCS-yes += common/entropy.c
-VP10_COMMON_SRCS-yes += common/entropymode.c
-VP10_COMMON_SRCS-yes += common/entropymv.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.c
-VP10_COMMON_SRCS-yes += common/frame_buffers.h
-VP10_COMMON_SRCS-yes += common/alloccommon.h
-VP10_COMMON_SRCS-yes += common/blockd.h
-VP10_COMMON_SRCS-yes += common/common.h
-VP10_COMMON_SRCS-yes += common/entropy.h
-VP10_COMMON_SRCS-yes += common/entropymode.h
-VP10_COMMON_SRCS-yes += common/entropymv.h
-VP10_COMMON_SRCS-yes += common/enums.h
-VP10_COMMON_SRCS-yes += common/filter.h
-VP10_COMMON_SRCS-yes += common/filter.c
-VP10_COMMON_SRCS-yes += common/idct.h
-VP10_COMMON_SRCS-yes += common/idct.c
-VP10_COMMON_SRCS-yes += common/av1_inv_txfm.h
-VP10_COMMON_SRCS-yes += common/av1_inv_txfm.c
-VP10_COMMON_SRCS-yes += common/loopfilter.h
-VP10_COMMON_SRCS-yes += common/thread_common.h
-VP10_COMMON_SRCS-yes += common/mv.h
-VP10_COMMON_SRCS-yes += common/onyxc_int.h
-VP10_COMMON_SRCS-yes += common/pred_common.h
-VP10_COMMON_SRCS-yes += common/pred_common.c
-VP10_COMMON_SRCS-yes += common/quant_common.h
-VP10_COMMON_SRCS-yes += common/reconinter.h
-VP10_COMMON_SRCS-yes += common/reconintra.h
-VP10_COMMON_SRCS-yes += common/av1_rtcd.c
-VP10_COMMON_SRCS-yes += common/av1_rtcd_defs.pl
-VP10_COMMON_SRCS-yes += common/scale.h
-VP10_COMMON_SRCS-yes += common/scale.c
-VP10_COMMON_SRCS-yes += common/seg_common.h
-VP10_COMMON_SRCS-yes += common/seg_common.c
-VP10_COMMON_SRCS-yes += common/tile_common.h
-VP10_COMMON_SRCS-yes += common/tile_common.c
-VP10_COMMON_SRCS-yes += common/loopfilter.c
-VP10_COMMON_SRCS-yes += common/thread_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.c
-VP10_COMMON_SRCS-yes += common/mvref_common.h
-VP10_COMMON_SRCS-yes += common/quant_common.c
-VP10_COMMON_SRCS-yes += common/reconinter.c
-VP10_COMMON_SRCS-yes += common/reconintra.c
-VP10_COMMON_SRCS-yes += common/common_data.h
-VP10_COMMON_SRCS-yes += common/scan.c
-VP10_COMMON_SRCS-yes += common/scan.h
-VP10_COMMON_SRCS-yes += common/av1_fwd_txfm.h
-VP10_COMMON_SRCS-yes += common/av1_fwd_txfm.c
-VP10_COMMON_SRCS-yes += common/clpf.c
-VP10_COMMON_SRCS-yes += common/clpf.h
+AV1_COMMON_SRCS-yes += av1_common.mk
+AV1_COMMON_SRCS-yes += av1_iface_common.h
+AV1_COMMON_SRCS-yes += common/alloccommon.c
+AV1_COMMON_SRCS-yes += common/blockd.c
+AV1_COMMON_SRCS-yes += common/debugmodes.c
+AV1_COMMON_SRCS-yes += common/entropy.c
+AV1_COMMON_SRCS-yes += common/entropymode.c
+AV1_COMMON_SRCS-yes += common/entropymv.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.c
+AV1_COMMON_SRCS-yes += common/frame_buffers.h
+AV1_COMMON_SRCS-yes += common/alloccommon.h
+AV1_COMMON_SRCS-yes += common/blockd.h
+AV1_COMMON_SRCS-yes += common/common.h
+AV1_COMMON_SRCS-yes += common/entropy.h
+AV1_COMMON_SRCS-yes += common/entropymode.h
+AV1_COMMON_SRCS-yes += common/entropymv.h
+AV1_COMMON_SRCS-yes += common/enums.h
+AV1_COMMON_SRCS-yes += common/filter.h
+AV1_COMMON_SRCS-yes += common/filter.c
+AV1_COMMON_SRCS-yes += common/idct.h
+AV1_COMMON_SRCS-yes += common/idct.c
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_inv_txfm.c
+AV1_COMMON_SRCS-yes += common/loopfilter.h
+AV1_COMMON_SRCS-yes += common/thread_common.h
+AV1_COMMON_SRCS-yes += common/mv.h
+AV1_COMMON_SRCS-yes += common/onyxc_int.h
+AV1_COMMON_SRCS-yes += common/pred_common.h
+AV1_COMMON_SRCS-yes += common/pred_common.c
+AV1_COMMON_SRCS-yes += common/quant_common.h
+AV1_COMMON_SRCS-yes += common/reconinter.h
+AV1_COMMON_SRCS-yes += common/reconintra.h
+AV1_COMMON_SRCS-yes += common/av1_rtcd.c
+AV1_COMMON_SRCS-yes += common/av1_rtcd_defs.pl
+AV1_COMMON_SRCS-yes += common/scale.h
+AV1_COMMON_SRCS-yes += common/scale.c
+AV1_COMMON_SRCS-yes += common/seg_common.h
+AV1_COMMON_SRCS-yes += common/seg_common.c
+AV1_COMMON_SRCS-yes += common/tile_common.h
+AV1_COMMON_SRCS-yes += common/tile_common.c
+AV1_COMMON_SRCS-yes += common/loopfilter.c
+AV1_COMMON_SRCS-yes += common/thread_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.c
+AV1_COMMON_SRCS-yes += common/mvref_common.h
+AV1_COMMON_SRCS-yes += common/quant_common.c
+AV1_COMMON_SRCS-yes += common/reconinter.c
+AV1_COMMON_SRCS-yes += common/reconintra.c
+AV1_COMMON_SRCS-yes += common/common_data.h
+AV1_COMMON_SRCS-yes += common/scan.c
+AV1_COMMON_SRCS-yes += common/scan.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.h
+AV1_COMMON_SRCS-yes += common/av1_fwd_txfm.c
+AV1_COMMON_SRCS-yes += common/clpf.c
+AV1_COMMON_SRCS-yes += common/clpf.h
 ifeq ($(CONFIG_DERING),yes)
-VP10_COMMON_SRCS-yes += common/od_dering.c
-VP10_COMMON_SRCS-yes += common/od_dering.h
-VP10_COMMON_SRCS-yes += common/dering.c
-VP10_COMMON_SRCS-yes += common/dering.h
+AV1_COMMON_SRCS-yes += common/od_dering.c
+AV1_COMMON_SRCS-yes += common/od_dering.h
+AV1_COMMON_SRCS-yes += common/dering.c
+AV1_COMMON_SRCS-yes += common/dering.h
 endif
-VP10_COMMON_SRCS-yes += common/odintrin.c
-VP10_COMMON_SRCS-yes += common/odintrin.h
+AV1_COMMON_SRCS-yes += common/odintrin.c
+AV1_COMMON_SRCS-yes += common/odintrin.h
 
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans4_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans8_dspr2.c
-VP10_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans16_dspr2.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans4_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans8_dspr2.c
+AV1_COMMON_SRCS-$(HAVE_DSPR2)  += common/mips/dspr2/itrans16_dspr2.c
 endif
 
 # common (msa)
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
-VP10_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct4x4_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct8x8_msa.c
+AV1_COMMON_SRCS-$(HAVE_MSA) += common/mips/msa/idct16x16_msa.c
 
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/idct_intrin_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_dct32x32_impl_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_fwd_txfm_impl_sse2.h
 
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
-VP10_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht4x4_add_neon.c
+AV1_COMMON_SRCS-$(HAVE_NEON) += common/arm/neon/iht8x8_add_neon.c
 endif
 
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
-VP10_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.c
+AV1_COMMON_SRCS-$(HAVE_SSE2) += common/x86/av1_inv_txfm_sse2.h
 
 $(eval $(call rtcd_h_template,av1_rtcd,av1/common/av1_rtcd_defs.pl))
diff --git a/av1/av1_cx.mk b/av1/av1_cx.mk
index 22448ce7f95894783ead3aef9a4066e0a6e41ca0..065bef433ac1a1976b8f720fb5c190e8fe7803a0 100644
--- a/av1/av1_cx.mk
+++ b/av1/av1_cx.mk
@@ -8,112 +8,112 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-VP10_CX_EXPORTS += exports_enc
+AV1_CX_EXPORTS += exports_enc
 
-VP10_CX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_CX_SRCS-no  += $(VP10_COMMON_SRCS-no)
-VP10_CX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_CX_SRCS_REMOVE-no  += $(VP10_COMMON_SRCS_REMOVE-no)
+AV1_CX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_CX_SRCS-no  += $(AV1_COMMON_SRCS-no)
+AV1_CX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_CX_SRCS_REMOVE-no  += $(AV1_COMMON_SRCS_REMOVE-no)
 
-VP10_CX_SRCS-yes += av1_cx_iface.c
+AV1_CX_SRCS-yes += av1_cx_iface.c
 
-VP10_CX_SRCS-yes += encoder/bitstream.c
-VP10_CX_SRCS-yes += encoder/context_tree.c
-VP10_CX_SRCS-yes += encoder/context_tree.h
-VP10_CX_SRCS-yes += encoder/cost.h
-VP10_CX_SRCS-yes += encoder/cost.c
-VP10_CX_SRCS-yes += encoder/dct.c
-VP10_CX_SRCS-yes += encoder/encodeframe.c
-VP10_CX_SRCS-yes += encoder/encodeframe.h
-VP10_CX_SRCS-yes += encoder/encodemb.c
-VP10_CX_SRCS-yes += encoder/encodemv.c
-VP10_CX_SRCS-yes += encoder/ethread.h
-VP10_CX_SRCS-yes += encoder/ethread.c
-VP10_CX_SRCS-yes += encoder/extend.c
-VP10_CX_SRCS-yes += encoder/firstpass.c
-VP10_CX_SRCS-yes += encoder/block.h
-VP10_CX_SRCS-yes += encoder/bitstream.h
-VP10_CX_SRCS-yes += encoder/encodemb.h
-VP10_CX_SRCS-yes += encoder/encodemv.h
-VP10_CX_SRCS-yes += encoder/extend.h
-VP10_CX_SRCS-yes += encoder/firstpass.h
-VP10_CX_SRCS-yes += encoder/lookahead.c
-VP10_CX_SRCS-yes += encoder/lookahead.h
-VP10_CX_SRCS-yes += encoder/mcomp.h
-VP10_CX_SRCS-yes += encoder/encoder.h
-VP10_CX_SRCS-yes += encoder/quantize.h
-VP10_CX_SRCS-yes += encoder/ratectrl.h
-VP10_CX_SRCS-yes += encoder/rd.h
-VP10_CX_SRCS-yes += encoder/rdopt.h
-VP10_CX_SRCS-yes += encoder/tokenize.h
-VP10_CX_SRCS-yes += encoder/treewriter.h
-VP10_CX_SRCS-yes += encoder/mcomp.c
-VP10_CX_SRCS-yes += encoder/encoder.c
-VP10_CX_SRCS-yes += encoder/picklpf.c
-VP10_CX_SRCS-yes += encoder/picklpf.h
-VP10_CX_SRCS-yes += encoder/quantize.c
-VP10_CX_SRCS-yes += encoder/ratectrl.c
-VP10_CX_SRCS-yes += encoder/rd.c
-VP10_CX_SRCS-yes += encoder/rdopt.c
-VP10_CX_SRCS-yes += encoder/segmentation.c
-VP10_CX_SRCS-yes += encoder/segmentation.h
-VP10_CX_SRCS-yes += encoder/speed_features.c
-VP10_CX_SRCS-yes += encoder/speed_features.h
-VP10_CX_SRCS-yes += encoder/subexp.c
-VP10_CX_SRCS-yes += encoder/subexp.h
-VP10_CX_SRCS-yes += encoder/resize.c
-VP10_CX_SRCS-yes += encoder/resize.h
-VP10_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
+AV1_CX_SRCS-yes += encoder/bitstream.c
+AV1_CX_SRCS-yes += encoder/context_tree.c
+AV1_CX_SRCS-yes += encoder/context_tree.h
+AV1_CX_SRCS-yes += encoder/cost.h
+AV1_CX_SRCS-yes += encoder/cost.c
+AV1_CX_SRCS-yes += encoder/dct.c
+AV1_CX_SRCS-yes += encoder/encodeframe.c
+AV1_CX_SRCS-yes += encoder/encodeframe.h
+AV1_CX_SRCS-yes += encoder/encodemb.c
+AV1_CX_SRCS-yes += encoder/encodemv.c
+AV1_CX_SRCS-yes += encoder/ethread.h
+AV1_CX_SRCS-yes += encoder/ethread.c
+AV1_CX_SRCS-yes += encoder/extend.c
+AV1_CX_SRCS-yes += encoder/firstpass.c
+AV1_CX_SRCS-yes += encoder/block.h
+AV1_CX_SRCS-yes += encoder/bitstream.h
+AV1_CX_SRCS-yes += encoder/encodemb.h
+AV1_CX_SRCS-yes += encoder/encodemv.h
+AV1_CX_SRCS-yes += encoder/extend.h
+AV1_CX_SRCS-yes += encoder/firstpass.h
+AV1_CX_SRCS-yes += encoder/lookahead.c
+AV1_CX_SRCS-yes += encoder/lookahead.h
+AV1_CX_SRCS-yes += encoder/mcomp.h
+AV1_CX_SRCS-yes += encoder/encoder.h
+AV1_CX_SRCS-yes += encoder/quantize.h
+AV1_CX_SRCS-yes += encoder/ratectrl.h
+AV1_CX_SRCS-yes += encoder/rd.h
+AV1_CX_SRCS-yes += encoder/rdopt.h
+AV1_CX_SRCS-yes += encoder/tokenize.h
+AV1_CX_SRCS-yes += encoder/treewriter.h
+AV1_CX_SRCS-yes += encoder/mcomp.c
+AV1_CX_SRCS-yes += encoder/encoder.c
+AV1_CX_SRCS-yes += encoder/picklpf.c
+AV1_CX_SRCS-yes += encoder/picklpf.h
+AV1_CX_SRCS-yes += encoder/quantize.c
+AV1_CX_SRCS-yes += encoder/ratectrl.c
+AV1_CX_SRCS-yes += encoder/rd.c
+AV1_CX_SRCS-yes += encoder/rdopt.c
+AV1_CX_SRCS-yes += encoder/segmentation.c
+AV1_CX_SRCS-yes += encoder/segmentation.h
+AV1_CX_SRCS-yes += encoder/speed_features.c
+AV1_CX_SRCS-yes += encoder/speed_features.h
+AV1_CX_SRCS-yes += encoder/subexp.c
+AV1_CX_SRCS-yes += encoder/subexp.h
+AV1_CX_SRCS-yes += encoder/resize.c
+AV1_CX_SRCS-yes += encoder/resize.h
+AV1_CX_SRCS-$(CONFIG_INTERNAL_STATS) += encoder/blockiness.c
 
-VP10_CX_SRCS-yes += encoder/tokenize.c
-VP10_CX_SRCS-yes += encoder/treewriter.c
-VP10_CX_SRCS-yes += encoder/aq_variance.c
-VP10_CX_SRCS-yes += encoder/aq_variance.h
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
-VP10_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
-VP10_CX_SRCS-yes += encoder/aq_complexity.c
-VP10_CX_SRCS-yes += encoder/aq_complexity.h
-VP10_CX_SRCS-yes += encoder/skin_detection.c
-VP10_CX_SRCS-yes += encoder/skin_detection.h
-VP10_CX_SRCS-yes += encoder/temporal_filter.c
-VP10_CX_SRCS-yes += encoder/temporal_filter.h
-VP10_CX_SRCS-yes += encoder/mbgraph.c
-VP10_CX_SRCS-yes += encoder/mbgraph.h
-VP10_CX_SRCS-yes += encoder/pickdering.c
+AV1_CX_SRCS-yes += encoder/tokenize.c
+AV1_CX_SRCS-yes += encoder/treewriter.c
+AV1_CX_SRCS-yes += encoder/aq_variance.c
+AV1_CX_SRCS-yes += encoder/aq_variance.h
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.c
+AV1_CX_SRCS-yes += encoder/aq_cyclicrefresh.h
+AV1_CX_SRCS-yes += encoder/aq_complexity.c
+AV1_CX_SRCS-yes += encoder/aq_complexity.h
+AV1_CX_SRCS-yes += encoder/skin_detection.c
+AV1_CX_SRCS-yes += encoder/skin_detection.h
+AV1_CX_SRCS-yes += encoder/temporal_filter.c
+AV1_CX_SRCS-yes += encoder/temporal_filter.h
+AV1_CX_SRCS-yes += encoder/mbgraph.c
+AV1_CX_SRCS-yes += encoder/mbgraph.h
+AV1_CX_SRCS-yes += encoder/pickdering.c
 
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
-ifeq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/temporal_filter_apply_sse2.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/quantize_sse2.c
+ifeq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/highbd_block_error_intrin_sse2.c
 endif
 
 ifeq ($(CONFIG_USE_X86INC),yes)
-VP10_CX_SRCS-$(HAVE_MMX) += encoder/x86/dct_mmx.asm
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
+AV1_CX_SRCS-$(HAVE_MMX) += encoder/x86/dct_mmx.asm
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/error_sse2.asm
 endif
 
 ifeq ($(ARCH_X86_64),yes)
 ifeq ($(CONFIG_USE_X86INC),yes)
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/quantize_ssse3_x86_64.asm
 endif
 endif
 
-VP10_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.c
-VP10_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
+AV1_CX_SRCS-$(HAVE_SSE2) += encoder/x86/dct_sse2.c
+AV1_CX_SRCS-$(HAVE_SSSE3) += encoder/x86/dct_ssse3.c
 
-VP10_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
+AV1_CX_SRCS-$(HAVE_AVX2) += encoder/x86/error_intrin_avx2.c
 
-ifneq ($(CONFIG_VPX_HIGHBITDEPTH),yes)
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
+ifneq ($(CONFIG_AOM_HIGHBITDEPTH),yes)
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/dct_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/error_neon.c
 endif
-VP10_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
+AV1_CX_SRCS-$(HAVE_NEON) += encoder/arm/neon/quantize_neon.c
 
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
-VP10_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/error_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct4x4_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct8x8_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct16x16_msa.c
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/fdct_msa.h
+AV1_CX_SRCS-$(HAVE_MSA) += encoder/mips/msa/temporal_filter_msa.c
 
-VP10_CX_SRCS-yes := $(filter-out $(VP10_CX_SRCS_REMOVE-yes),$(VP10_CX_SRCS-yes))
+AV1_CX_SRCS-yes := $(filter-out $(AV1_CX_SRCS_REMOVE-yes),$(AV1_CX_SRCS-yes))
diff --git a/av1/av1_cx_iface.c b/av1/av1_cx_iface.c
index 162ee065f31001d6952e01c31e507ceca07a5375..921523750658e44793c8482e14991c3c13437420 100644
--- a/av1/av1_cx_iface.c
+++ b/av1/av1_cx_iface.c
@@ -22,7 +22,7 @@
 #include "av1/encoder/firstpass.h"
 #include "av1/av1_iface_common.h"
 
-struct vp10_extracfg {
+struct av1_extracfg {
   int cpu_used;  // available cpu percentage in 1/16
   unsigned int enable_auto_alt_ref;
   unsigned int noise_sensitivity;
@@ -56,7 +56,7 @@ struct vp10_extracfg {
   int render_height;
 };
 
-static struct vp10_extracfg default_extra_cfg = {
+static struct av1_extracfg default_extra_cfg = {
   0,              // cpu_used
   1,              // enable_auto_alt_ref
   0,              // noise_sensitivity
@@ -93,9 +93,9 @@ static struct vp10_extracfg default_extra_cfg = {
 struct aom_codec_alg_priv {
   aom_codec_priv_t base;
   aom_codec_enc_cfg_t cfg;
-  struct vp10_extracfg extra_cfg;
-  VP10EncoderConfig oxcf;
-  VP10_COMP *cpi;
+  struct av1_extracfg extra_cfg;
+  AV1EncoderConfig oxcf;
+  AV1_COMP *cpi;
   unsigned char *cx_data;
   size_t cx_data_sz;
   unsigned char *pending_cx_data;
@@ -115,7 +115,7 @@ struct aom_codec_alg_priv {
   BufferPool *buffer_pool;
 };
 
-static VPX_REFFRAME ref_frame_to_vp10_reframe(aom_ref_frame_type_t frame) {
+static VPX_REFFRAME ref_frame_to_av1_reframe(aom_ref_frame_type_t frame) {
   switch (frame) {
     case VP8_LAST_FRAME: return VPX_LAST_FLAG;
     case VP8_GOLD_FRAME: return VPX_GOLD_FLAG;
@@ -165,7 +165,7 @@ static aom_codec_err_t update_error_state(
 
 static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
                                        const aom_codec_enc_cfg_t *cfg,
-                                       const struct vp10_extracfg *extra_cfg) {
+                                       const struct av1_extracfg *extra_cfg) {
   RANGE_CHECK(cfg, g_w, 1, 65535);  // 16 bits available
   RANGE_CHECK(cfg, g_h, 1, 65535);  // 16 bits available
   RANGE_CHECK(cfg, g_timebase.den, 1, 1000000000);
@@ -204,11 +204,11 @@ static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
     RANGE_CHECK(cfg, rc_scaled_height, 0, cfg->g_h);
   }
 
-  // Spatial/temporal scalability are not yet supported in VP10.
+  // Spatial/temporal scalability are not yet supported in AV1.
   // Only accept the default value for range checking.
   RANGE_CHECK(cfg, ss_number_layers, 1, 1);
   RANGE_CHECK(cfg, ts_number_layers, 1, 1);
-  // VP10 does not support a lower bound on the keyframe interval in
+  // AV1 does not support a lower bound on the keyframe interval in
   // automatic keyframe placement mode.
   if (cfg->kf_mode != VPX_KF_DISABLED && cfg->kf_min_dist != cfg->kf_max_dist &&
       cfg->kf_min_dist > 0)
@@ -229,9 +229,9 @@ static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
   RANGE_CHECK(cfg, g_input_bit_depth, 8, 12);
   RANGE_CHECK(extra_cfg, content, VPX_CONTENT_DEFAULT, VPX_CONTENT_INVALID - 1);
 
-  // TODO(yaowu): remove this when ssim tuning is implemented for vp10
+  // TODO(yaowu): remove this when ssim tuning is implemented for av1
   if (extra_cfg->tuning == VPX_TUNE_SSIM)
-    ERROR("Option --tune=ssim is not currently supported in VP10.");
+    ERROR("Option --tune=ssim is not currently supported in AV1.");
 
   if (cfg->g_pass == VPX_RC_LAST_PASS) {
     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
@@ -254,7 +254,7 @@ static aom_codec_err_t validate_config(aom_codec_alg_priv_t *ctx,
       ERROR("rc_twopass_stats_in missing EOS stats packet");
   }
 
-#if !CONFIG_VPX_HIGHBITDEPTH
+#if !CONFIG_AOM_HIGHBITDEPTH
   if (cfg->g_profile > (unsigned int)PROFILE_1) {
     ERROR("Profile > 1 not supported in this build configuration");
   }
@@ -330,8 +330,8 @@ static int get_image_bps(const aom_image_t *img) {
 }
 
 static aom_codec_err_t set_encoder_config(
-    VP10EncoderConfig *oxcf, const aom_codec_enc_cfg_t *cfg,
-    const struct vp10_extracfg *extra_cfg) {
+    AV1EncoderConfig *oxcf, const aom_codec_enc_cfg_t *cfg,
+    const struct av1_extracfg *extra_cfg) {
   const int is_vbr = cfg->rc_end_usage == VPX_VBR;
   oxcf->profile = cfg->g_profile;
   oxcf->max_threads = (int)cfg->g_threads;
@@ -362,10 +362,10 @@ static aom_codec_err_t set_encoder_config(
   oxcf->gf_cbr_boost_pct = extra_cfg->gf_cbr_boost_pct;
 
   oxcf->best_allowed_q =
-      extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_min_quantizer);
+      extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_min_quantizer);
   oxcf->worst_allowed_q =
-      extra_cfg->lossless ? 0 : vp10_quantizer_to_qindex(cfg->rc_max_quantizer);
-  oxcf->cq_level = vp10_quantizer_to_qindex(extra_cfg->cq_level);
+      extra_cfg->lossless ? 0 : av1_quantizer_to_qindex(cfg->rc_max_quantizer);
+  oxcf->cq_level = av1_quantizer_to_qindex(extra_cfg->cq_level);
   oxcf->fixed_q = -1;
 
 #if CONFIG_AOM_QM
@@ -438,7 +438,7 @@ static aom_codec_err_t set_encoder_config(
   oxcf->frame_periodic_boost = extra_cfg->frame_periodic_boost;
 
   /*
-  printf("Current VP10 Settings: \n");
+  printf("Current AV1 Settings: \n");
   printf("target_bandwidth: %d\n", oxcf->target_bandwidth);
   printf("noise_sensitivity: %d\n", oxcf->noise_sensitivity);
   printf("sharpness: %d\n",    oxcf->sharpness);
@@ -500,7 +500,7 @@ static aom_codec_err_t encoder_set_config(aom_codec_alg_priv_t *ctx,
     set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
     // On profile change, request a key frame
     force_key |= ctx->cpi->common.profile != ctx->oxcf.profile;
-    vp10_change_config(ctx->cpi, &ctx->oxcf);
+    av1_change_config(ctx->cpi, &ctx->oxcf);
   }
 
   if (force_key) ctx->next_frame_flags |= VPX_EFLAG_FORCE_KF;
@@ -512,7 +512,7 @@ static aom_codec_err_t ctrl_get_quantizer(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
   int *const arg = va_arg(args, int *);
   if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
-  *arg = vp10_get_quantizer(ctx->cpi);
+  *arg = av1_get_quantizer(ctx->cpi);
   return VPX_CODEC_OK;
 }
 
@@ -520,80 +520,80 @@ static aom_codec_err_t ctrl_get_quantizer64(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
   int *const arg = va_arg(args, int *);
   if (arg == NULL) return VPX_CODEC_INVALID_PARAM;
-  *arg = vp10_qindex_to_quantizer(vp10_get_quantizer(ctx->cpi));
+  *arg = av1_qindex_to_quantizer(av1_get_quantizer(ctx->cpi));
   return VPX_CODEC_OK;
 }
 
 static aom_codec_err_t update_extra_cfg(aom_codec_alg_priv_t *ctx,
-                                        const struct vp10_extracfg *extra_cfg) {
+                                        const struct av1_extracfg *extra_cfg) {
   const aom_codec_err_t res = validate_config(ctx, &ctx->cfg, extra_cfg);
   if (res == VPX_CODEC_OK) {
     ctx->extra_cfg = *extra_cfg;
     set_encoder_config(&ctx->oxcf, &ctx->cfg, &ctx->extra_cfg);
-    vp10_change_config(ctx->cpi, &ctx->oxcf);
+    av1_change_config(ctx->cpi, &ctx->oxcf);
   }
   return res;
 }
 
 static aom_codec_err_t ctrl_set_cpuused(aom_codec_alg_priv_t *ctx,
                                         va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.cpu_used = CAST(VP8E_SET_CPUUSED, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_enable_auto_alt_ref(aom_codec_alg_priv_t *ctx,
                                                     va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.enable_auto_alt_ref = CAST(VP8E_SET_ENABLEAUTOALTREF, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_noise_sensitivity(aom_codec_alg_priv_t *ctx,
                                                   va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.noise_sensitivity = CAST(VP9E_SET_NOISE_SENSITIVITY, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_sharpness(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.sharpness = CAST(VP8E_SET_SHARPNESS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_static_thresh(aom_codec_alg_priv_t *ctx,
                                               va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.static_thresh = CAST(VP8E_SET_STATIC_THRESHOLD, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_tile_columns(aom_codec_alg_priv_t *ctx,
                                              va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.tile_columns = CAST(VP9E_SET_TILE_COLUMNS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_tile_rows(aom_codec_alg_priv_t *ctx,
                                           va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.tile_rows = CAST(VP9E_SET_TILE_ROWS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_arnr_max_frames(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.arnr_max_frames = CAST(VP8E_SET_ARNR_MAXFRAMES, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_arnr_strength(aom_codec_alg_priv_t *ctx,
                                               va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.arnr_strength = CAST(VP8E_SET_ARNR_STRENGTH, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
@@ -607,21 +607,21 @@ static aom_codec_err_t ctrl_set_arnr_type(aom_codec_alg_priv_t *ctx,
 
 static aom_codec_err_t ctrl_set_tuning(aom_codec_alg_priv_t *ctx,
                                        va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.tuning = CAST(VP8E_SET_TUNING, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_cq_level(aom_codec_alg_priv_t *ctx,
                                          va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.cq_level = CAST(VP8E_SET_CQ_LEVEL, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
     aom_codec_alg_priv_t *ctx, va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.rc_max_intra_bitrate_pct =
       CAST(VP8E_SET_MAX_INTRA_BITRATE_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
@@ -629,7 +629,7 @@ static aom_codec_err_t ctrl_set_rc_max_intra_bitrate_pct(
 
 static aom_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
     aom_codec_alg_priv_t *ctx, va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.rc_max_inter_bitrate_pct =
       CAST(VP8E_SET_MAX_INTER_BITRATE_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
@@ -637,14 +637,14 @@ static aom_codec_err_t ctrl_set_rc_max_inter_bitrate_pct(
 
 static aom_codec_err_t ctrl_set_rc_gf_cbr_boost_pct(aom_codec_alg_priv_t *ctx,
                                                     va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.gf_cbr_boost_pct = CAST(VP9E_SET_GF_CBR_BOOST_PCT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_lossless(aom_codec_alg_priv_t *ctx,
                                          va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.lossless = CAST(VP9E_SET_LOSSLESS, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
@@ -652,21 +652,21 @@ static aom_codec_err_t ctrl_set_lossless(aom_codec_alg_priv_t *ctx,
 #if CONFIG_AOM_QM
 static aom_codec_err_t ctrl_set_enable_qm(aom_codec_alg_priv_t *ctx,
                                          va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.enable_qm = CAST(VP9E_SET_ENABLE_QM, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_qm_min(aom_codec_alg_priv_t *ctx,
                                        va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.qm_min = CAST(VP9E_SET_QM_MIN, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_qm_max(aom_codec_alg_priv_t *ctx,
                                        va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.qm_max = CAST(VP9E_SET_QM_MAX, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
@@ -674,7 +674,7 @@ static aom_codec_err_t ctrl_set_qm_max(aom_codec_alg_priv_t *ctx,
 
 static aom_codec_err_t ctrl_set_frame_parallel_decoding_mode(
     aom_codec_alg_priv_t *ctx, va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.frame_parallel_decoding_mode =
       CAST(VP9E_SET_FRAME_PARALLEL_DECODING, args);
   return update_extra_cfg(ctx, &extra_cfg);
@@ -682,28 +682,28 @@ static aom_codec_err_t ctrl_set_frame_parallel_decoding_mode(
 
 static aom_codec_err_t ctrl_set_aq_mode(aom_codec_alg_priv_t *ctx,
                                         va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.aq_mode = CAST(VP9E_SET_AQ_MODE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_min_gf_interval(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.min_gf_interval = CAST(VP9E_SET_MIN_GF_INTERVAL, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_max_gf_interval(aom_codec_alg_priv_t *ctx,
                                                 va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.max_gf_interval = CAST(VP9E_SET_MAX_GF_INTERVAL, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_frame_periodic_boost(aom_codec_alg_priv_t *ctx,
                                                      va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.frame_periodic_boost = CAST(VP9E_SET_FRAME_PERIODIC_BOOST, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
@@ -736,17 +736,17 @@ static aom_codec_err_t encoder_init(aom_codec_ctx_t *ctx,
     }
 
     priv->extra_cfg = default_extra_cfg;
-    once(vp10_initialize_enc);
+    once(av1_initialize_enc);
 
     res = validate_config(priv, &priv->cfg, &priv->extra_cfg);
 
     if (res == VPX_CODEC_OK) {
       set_encoder_config(&priv->oxcf, &priv->cfg, &priv->extra_cfg);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       priv->oxcf.use_highbitdepth =
           (ctx->init_flags & VPX_CODEC_USE_HIGHBITDEPTH) ? 1 : 0;
 #endif
-      priv->cpi = vp10_create_compressor(&priv->oxcf, priv->buffer_pool);
+      priv->cpi = av1_create_compressor(&priv->oxcf, priv->buffer_pool);
       if (priv->cpi == NULL)
         res = VPX_CODEC_MEM_ERROR;
       else
@@ -759,7 +759,7 @@ static aom_codec_err_t encoder_init(aom_codec_ctx_t *ctx,
 
 static aom_codec_err_t encoder_destroy(aom_codec_alg_priv_t *ctx) {
   free(ctx->cx_data);
-  vp10_remove_compressor(ctx->cpi);
+  av1_remove_compressor(ctx->cpi);
 #if CONFIG_MULTITHREAD
   pthread_mutex_destroy(&ctx->buffer_pool->pool_mutex);
 #endif
@@ -796,7 +796,7 @@ static void pick_quickcompress_mode(aom_codec_alg_priv_t *ctx,
 
   if (ctx->oxcf.mode != new_mode) {
     ctx->oxcf.mode = new_mode;
-    vp10_change_config(ctx->cpi, &ctx->oxcf);
+    av1_change_config(ctx->cpi, &ctx->oxcf);
   }
 }
 
@@ -875,7 +875,7 @@ static int write_superframe_index(aom_codec_alg_priv_t *ctx) {
   return index_sz;
 }
 
-// vp10 uses 10,000,000 ticks/second as time stamp
+// av1 uses 10,000,000 ticks/second as time stamp
 #define TICKS_PER_SEC 10000000LL
 
 static int64_t timebase_units_to_ticks(const aom_rational_t *timebase,
@@ -889,7 +889,7 @@ static int64_t ticks_to_timebase_units(const aom_rational_t *timebase,
   return (n * timebase->den + round) / timebase->num / TICKS_PER_SEC;
 }
 
-static aom_codec_frame_flags_t get_frame_pkt_flags(const VP10_COMP *cpi,
+static aom_codec_frame_flags_t get_frame_pkt_flags(const AV1_COMP *cpi,
                                                    unsigned int lib_flags) {
   aom_codec_frame_flags_t flags = lib_flags << 16;
 
@@ -907,7 +907,7 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
                                       aom_enc_frame_flags_t flags,
                                       unsigned long deadline) {
   aom_codec_err_t res = VPX_CODEC_OK;
-  VP10_COMP *const cpi = ctx->cpi;
+  AV1_COMP *const cpi = ctx->cpi;
   const aom_rational_t *const timebase = &ctx->cfg.g_timebase;
   size_t data_sz;
 
@@ -942,7 +942,7 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
     return VPX_CODEC_INVALID_PARAM;
   }
 
-  vp10_apply_encoding_flags(cpi, flags);
+  av1_apply_encoding_flags(cpi, flags);
 
   // Handle fixed keyframe intervals
   if (ctx->cfg.kf_mode == VPX_KF_AUTO &&
@@ -971,7 +971,7 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
 
       // Store the original flags in to the frame buffer. Will extract the
       // key frame flag when we actually encode this frame.
-      if (vp10_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
+      if (av1_receive_raw_frame(cpi, flags | ctx->next_frame_flags, &sd,
                                  dst_time_stamp, dst_end_time_stamp)) {
         res = update_error_state(ctx, &cpi->common.error);
       }
@@ -998,7 +998,7 @@ static aom_codec_err_t encoder_encode(aom_codec_alg_priv_t *ctx,
     }
 
     while (cx_data_sz >= ctx->cx_data_sz / 2 &&
-           -1 != vp10_get_compressed_data(cpi, &lib_flags, &size, cx_data,
+           -1 != av1_get_compressed_data(cpi, &lib_flags, &size, cx_data,
                                           &dst_time_stamp, &dst_end_time_stamp,
                                           !img)) {
       if (size) {
@@ -1094,8 +1094,8 @@ static aom_codec_err_t ctrl_set_reference(aom_codec_alg_priv_t *ctx,
     YV12_BUFFER_CONFIG sd;
 
     image2yuvconfig(&frame->img, &sd);
-    vp10_set_reference_enc(ctx->cpi,
-                           ref_frame_to_vp10_reframe(frame->frame_type), &sd);
+    av1_set_reference_enc(ctx->cpi,
+                           ref_frame_to_av1_reframe(frame->frame_type), &sd);
     return VPX_CODEC_OK;
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -1110,8 +1110,8 @@ static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx,
     YV12_BUFFER_CONFIG sd;
 
     image2yuvconfig(&frame->img, &sd);
-    vp10_copy_reference_enc(ctx->cpi,
-                            ref_frame_to_vp10_reframe(frame->frame_type), &sd);
+    av1_copy_reference_enc(ctx->cpi,
+                            ref_frame_to_av1_reframe(frame->frame_type), &sd);
     return VPX_CODEC_OK;
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -1143,7 +1143,7 @@ static aom_codec_err_t ctrl_set_previewpp(aom_codec_alg_priv_t *ctx,
 static aom_image_t *encoder_get_preview(aom_codec_alg_priv_t *ctx) {
   YV12_BUFFER_CONFIG sd;
 
-  if (vp10_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
+  if (av1_get_preview_raw_frame(ctx->cpi, &sd) == 0) {
     yuvconfig2image(&ctx->preview_img, &sd, NULL);
     return &ctx->preview_img;
   } else {
@@ -1156,7 +1156,7 @@ static aom_codec_err_t ctrl_set_roi_map(aom_codec_alg_priv_t *ctx,
   (void)ctx;
   (void)args;
 
-  // TODO(yaowu): Need to re-implement and test for VP10.
+  // TODO(yaowu): Need to re-implement and test for AV1.
   return VPX_CODEC_INVALID_PARAM;
 }
 
@@ -1165,7 +1165,7 @@ static aom_codec_err_t ctrl_set_active_map(aom_codec_alg_priv_t *ctx,
   aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
 
   if (map) {
-    if (!vp10_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
+    if (!av1_set_active_map(ctx->cpi, map->active_map, (int)map->rows,
                              (int)map->cols))
       return VPX_CODEC_OK;
     else
@@ -1180,7 +1180,7 @@ static aom_codec_err_t ctrl_get_active_map(aom_codec_alg_priv_t *ctx,
   aom_active_map_t *const map = va_arg(args, aom_active_map_t *);
 
   if (map) {
-    if (!vp10_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
+    if (!av1_get_active_map(ctx->cpi, map->active_map, (int)map->rows,
                              (int)map->cols))
       return VPX_CODEC_OK;
     else
@@ -1196,7 +1196,7 @@ static aom_codec_err_t ctrl_set_scale_mode(aom_codec_alg_priv_t *ctx,
 
   if (mode) {
     const int res =
-        vp10_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
+        av1_set_internal_size(ctx->cpi, (VPX_SCALING)mode->h_scaling_mode,
                                (VPX_SCALING)mode->v_scaling_mode);
     return (res == 0) ? VPX_CODEC_OK : VPX_CODEC_INVALID_PARAM;
   } else {
@@ -1216,28 +1216,28 @@ static aom_codec_err_t ctrl_register_cx_callback(aom_codec_alg_priv_t *ctx,
 
 static aom_codec_err_t ctrl_set_tune_content(aom_codec_alg_priv_t *ctx,
                                              va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.content = CAST(VP9E_SET_TUNE_CONTENT, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_color_space(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.color_space = CAST(VP9E_SET_COLOR_SPACE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_color_range(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   extra_cfg.color_range = CAST(VP9E_SET_COLOR_RANGE, args);
   return update_extra_cfg(ctx, &extra_cfg);
 }
 
 static aom_codec_err_t ctrl_set_render_size(aom_codec_alg_priv_t *ctx,
                                             va_list args) {
-  struct vp10_extracfg extra_cfg = ctx->extra_cfg;
+  struct av1_extracfg extra_cfg = ctx->extra_cfg;
   int *const render_size = va_arg(args, int *);
   extra_cfg.render_width = render_size[0];
   extra_cfg.render_height = render_size[1];
@@ -1345,7 +1345,7 @@ static aom_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
         9999,         // kf_max_dist
 
         // TODO(yunqingwang): Spatial/temporal scalability are not supported
-        // in VP10. The following 10 parameters are not used, which should
+        // in AV1. The following 10 parameters are not used, which should
         // be removed later.
         1,  // ss_number_layers
         { 0 },
@@ -1363,10 +1363,10 @@ static aom_codec_enc_cfg_map_t encoder_usage_cfg_map[] = {
 #ifndef VERSION_STRING
 #define VERSION_STRING
 #endif
-CODEC_INTERFACE(aom_codec_vp10_cx) = {
-  "WebM Project VP10 Encoder" VERSION_STRING,
+CODEC_INTERFACE(aom_codec_av1_cx) = {
+  "WebM Project AV1 Encoder" VERSION_STRING,
   VPX_CODEC_INTERNAL_ABI_VERSION,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   VPX_CODEC_CAP_HIGHBITDEPTH |
 #endif
       VPX_CODEC_CAP_ENCODER | VPX_CODEC_CAP_PSNR,  // aom_codec_caps_t
diff --git a/av1/av1_dx.mk b/av1/av1_dx.mk
index f3d455e900e188c888f701192f8194ae0ed0bcb7..d1628dedfc38cf5a4ca3c29f7f7b8e3aa34db000 100644
--- a/av1/av1_dx.mk
+++ b/av1/av1_dx.mk
@@ -8,26 +8,26 @@
 ##  be found in the AUTHORS file in the root of the source tree.
 ##
 
-VP10_DX_EXPORTS += exports_dec
+AV1_DX_EXPORTS += exports_dec
 
-VP10_DX_SRCS-yes += $(VP10_COMMON_SRCS-yes)
-VP10_DX_SRCS-no  += $(VP10_COMMON_SRCS-no)
-VP10_DX_SRCS_REMOVE-yes += $(VP10_COMMON_SRCS_REMOVE-yes)
-VP10_DX_SRCS_REMOVE-no  += $(VP10_COMMON_SRCS_REMOVE-no)
+AV1_DX_SRCS-yes += $(AV1_COMMON_SRCS-yes)
+AV1_DX_SRCS-no  += $(AV1_COMMON_SRCS-no)
+AV1_DX_SRCS_REMOVE-yes += $(AV1_COMMON_SRCS_REMOVE-yes)
+AV1_DX_SRCS_REMOVE-no  += $(AV1_COMMON_SRCS_REMOVE-no)
 
-VP10_DX_SRCS-yes += av1_dx_iface.c
+AV1_DX_SRCS-yes += av1_dx_iface.c
 
-VP10_DX_SRCS-yes += decoder/decodemv.c
-VP10_DX_SRCS-yes += decoder/decodeframe.c
-VP10_DX_SRCS-yes += decoder/decodeframe.h
-VP10_DX_SRCS-yes += decoder/detokenize.c
-VP10_DX_SRCS-yes += decoder/decodemv.h
-VP10_DX_SRCS-yes += decoder/detokenize.h
-VP10_DX_SRCS-yes += decoder/dthread.c
-VP10_DX_SRCS-yes += decoder/dthread.h
-VP10_DX_SRCS-yes += decoder/decoder.c
-VP10_DX_SRCS-yes += decoder/decoder.h
-VP10_DX_SRCS-yes += decoder/dsubexp.c
-VP10_DX_SRCS-yes += decoder/dsubexp.h
+AV1_DX_SRCS-yes += decoder/decodemv.c
+AV1_DX_SRCS-yes += decoder/decodeframe.c
+AV1_DX_SRCS-yes += decoder/decodeframe.h
+AV1_DX_SRCS-yes += decoder/detokenize.c
+AV1_DX_SRCS-yes += decoder/decodemv.h
+AV1_DX_SRCS-yes += decoder/detokenize.h
+AV1_DX_SRCS-yes += decoder/dthread.c
+AV1_DX_SRCS-yes += decoder/dthread.h
+AV1_DX_SRCS-yes += decoder/decoder.c
+AV1_DX_SRCS-yes += decoder/decoder.h
+AV1_DX_SRCS-yes += decoder/dsubexp.c
+AV1_DX_SRCS-yes += decoder/dsubexp.h
 
-VP10_DX_SRCS-yes := $(filter-out $(VP10_DX_SRCS_REMOVE-yes),$(VP10_DX_SRCS-yes))
+AV1_DX_SRCS-yes := $(filter-out $(AV1_DX_SRCS_REMOVE-yes),$(AV1_DX_SRCS-yes))
diff --git a/av1/av1_dx_iface.c b/av1/av1_dx_iface.c
index f4fee9723a847ecbe0cfc72ebd5a896f0bb27cbc..bccbb888e238794b015ca0349bcb983d6c915f2d 100644
--- a/av1/av1_dx_iface.c
+++ b/av1/av1_dx_iface.c
@@ -30,7 +30,7 @@
 
 #include "av1/av1_iface_common.h"
 
-typedef aom_codec_stream_info_t vp10_stream_info_t;
+typedef aom_codec_stream_info_t av1_stream_info_t;
 
 // This limit is due to framebuffer numbers.
 // TODO(hkuang): Remove this limit after implementing ondemand framebuffers.
@@ -44,7 +44,7 @@ typedef struct cache_frame {
 struct aom_codec_alg_priv {
   aom_codec_priv_t base;
   aom_codec_dec_cfg_t cfg;
-  vp10_stream_info_t si;
+  av1_stream_info_t si;
   int postproc_cfg_set;
   vp8_postproc_cfg_t postproc_cfg;
   aom_decrypt_cb decrypt_cb;
@@ -73,7 +73,7 @@ struct aom_codec_alg_priv {
   // BufferPool that holds all reference frames. Shared by all the FrameWorkers.
   BufferPool *buffer_pool;
 
-  // External frame buffer info to save for VP10 common.
+  // External frame buffer info to save for AV1 common.
   void *ext_priv;  // Private data associated with the external frame buffers.
   aom_get_frame_buffer_cb_fn_t get_ext_fb_cb;
   aom_release_frame_buffer_cb_fn_t release_ext_fb_cb;
@@ -118,8 +118,8 @@ static aom_codec_err_t decoder_destroy(aom_codec_alg_priv_t *ctx) {
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
       aom_get_worker_interface()->end(worker);
-      vp10_remove_common(&frame_worker_data->pbi->common);
-      vp10_decoder_remove(frame_worker_data->pbi);
+      av1_remove_common(&frame_worker_data->pbi->common);
+      av1_decoder_remove(frame_worker_data->pbi);
       aom_free(frame_worker_data->scratch_buffer);
 #if CONFIG_MULTITHREAD
       pthread_mutex_destroy(&frame_worker_data->stats_mutex);
@@ -133,8 +133,8 @@ static aom_codec_err_t decoder_destroy(aom_codec_alg_priv_t *ctx) {
   }
 
   if (ctx->buffer_pool) {
-    vp10_free_ref_frame_buffers(ctx->buffer_pool);
-    vp10_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
+    av1_free_ref_frame_buffers(ctx->buffer_pool);
+    av1_free_internal_frame_buffers(&ctx->buffer_pool->int_frame_buffers);
   }
 
   aom_free(ctx->frame_workers);
@@ -187,7 +187,7 @@ static aom_codec_err_t decoder_peek_si_internal(
     int error_resilient;
     struct aom_read_bit_buffer rb = { data, data + data_sz, 0, NULL, NULL };
     const int frame_marker = aom_rb_read_literal(&rb, 2);
-    const BITSTREAM_PROFILE profile = vp10_read_profile(&rb);
+    const BITSTREAM_PROFILE profile = av1_read_profile(&rb);
 
     if (frame_marker != VPX_FRAME_MARKER) return VPX_CODEC_UNSUP_BITSTREAM;
 
@@ -208,24 +208,24 @@ static aom_codec_err_t decoder_peek_si_internal(
     error_resilient = aom_rb_read_bit(&rb);
 
     if (si->is_kf) {
-      if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+      if (!av1_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
 
       if (!parse_bitdepth_colorspace_sampling(profile, &rb))
         return VPX_CODEC_UNSUP_BITSTREAM;
-      vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+      av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
     } else {
       intra_only_flag = show_frame ? 0 : aom_rb_read_bit(&rb);
 
       rb.bit_offset += error_resilient ? 0 : 2;  // reset_frame_context
 
       if (intra_only_flag) {
-        if (!vp10_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
+        if (!av1_read_sync_code(&rb)) return VPX_CODEC_UNSUP_BITSTREAM;
         if (profile > PROFILE_0) {
           if (!parse_bitdepth_colorspace_sampling(profile, &rb))
             return VPX_CODEC_UNSUP_BITSTREAM;
         }
         rb.bit_offset += REF_FRAMES;  // refresh_frame_flags
-        vp10_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
+        av1_read_frame_size(&rb, (int *)&si->w, (int *)&si->h);
       }
     }
   }
@@ -241,8 +241,8 @@ static aom_codec_err_t decoder_peek_si(const uint8_t *data,
 
 static aom_codec_err_t decoder_get_si(aom_codec_alg_priv_t *ctx,
                                       aom_codec_stream_info_t *si) {
-  const size_t sz = (si->sz >= sizeof(vp10_stream_info_t))
-                        ? sizeof(vp10_stream_info_t)
+  const size_t sz = (si->sz >= sizeof(av1_stream_info_t))
+                        ? sizeof(av1_stream_info_t)
                         : sizeof(aom_codec_stream_info_t);
   memcpy(si, &ctx->si, sz);
   si->sz = (unsigned int)sz;
@@ -269,7 +269,7 @@ static void init_buffer_callbacks(aom_codec_alg_priv_t *ctx) {
   for (i = 0; i < ctx->num_frame_workers; ++i) {
     VPxWorker *const worker = &ctx->frame_workers[i];
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
-    VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+    AV1_COMMON *const cm = &frame_worker_data->pbi->common;
     BufferPool *const pool = cm->buffer_pool;
 
     cm->new_fb_idx = INVALID_IDX;
@@ -281,10 +281,10 @@ static void init_buffer_callbacks(aom_codec_alg_priv_t *ctx) {
       pool->release_fb_cb = ctx->release_ext_fb_cb;
       pool->cb_priv = ctx->ext_priv;
     } else {
-      pool->get_fb_cb = vp10_get_frame_buffer;
-      pool->release_fb_cb = vp10_release_frame_buffer;
+      pool->get_fb_cb = av1_get_frame_buffer;
+      pool->release_fb_cb = av1_release_frame_buffer;
 
-      if (vp10_alloc_internal_frame_buffers(&pool->int_frame_buffers))
+      if (av1_alloc_internal_frame_buffers(&pool->int_frame_buffers))
         aom_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
                            "Failed to initialize internal frame buffers");
 
@@ -304,7 +304,7 @@ static int frame_worker_hook(void *arg1, void *arg2) {
   const uint8_t *data = frame_worker_data->data;
   (void)arg2;
 
-  frame_worker_data->result = vp10_receive_compressed_data(
+  frame_worker_data->result = av1_receive_compressed_data(
       frame_worker_data->pbi, frame_worker_data->data_size, &data);
   frame_worker_data->data_end = data;
 
@@ -316,14 +316,14 @@ static int frame_worker_hook(void *arg1, void *arg2) {
       VPxWorker *const worker = frame_worker_data->pbi->frame_worker_owner;
       BufferPool *const pool = frame_worker_data->pbi->common.buffer_pool;
       // Signal all the other threads that are waiting for this frame.
-      vp10_frameworker_lock_stats(worker);
+      av1_frameworker_lock_stats(worker);
       frame_worker_data->frame_context_ready = 1;
       lock_buffer_pool(pool);
       frame_worker_data->pbi->cur_buf->buf.corrupted = 1;
       unlock_buffer_pool(pool);
       frame_worker_data->pbi->need_resync = 1;
-      vp10_frameworker_signal_stats(worker);
-      vp10_frameworker_unlock_stats(worker);
+      av1_frameworker_signal_stats(worker);
+      av1_frameworker_unlock_stats(worker);
       return 0;
     }
   } else if (frame_worker_data->result != 0) {
@@ -380,7 +380,7 @@ static aom_codec_err_t init_decoder(aom_codec_alg_priv_t *ctx) {
       return VPX_CODEC_MEM_ERROR;
     }
     frame_worker_data = (FrameWorkerData *)worker->data1;
-    frame_worker_data->pbi = vp10_decoder_create(ctx->buffer_pool);
+    frame_worker_data->pbi = av1_decoder_create(ctx->buffer_pool);
     if (frame_worker_data->pbi == NULL) {
       set_error_detail(ctx, "Failed to allocate frame_worker_data");
       return VPX_CODEC_MEM_ERROR;
@@ -428,7 +428,7 @@ static aom_codec_err_t init_decoder(aom_codec_alg_priv_t *ctx) {
 }
 
 static INLINE void check_resync(aom_codec_alg_priv_t *const ctx,
-                                const VP10Decoder *const pbi) {
+                                const AV1Decoder *const pbi) {
   // Clear resync flag if worker got a key frame or intra only frame.
   if (ctx->need_resync == 1 && pbi->need_resync == 0 &&
       (pbi->common.intra_only || pbi->common.frame_type == KEY_FRAME))
@@ -482,7 +482,7 @@ static aom_codec_err_t decode_one(aom_codec_alg_priv_t *ctx,
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     // Copy context from last worker thread to next worker thread.
     if (ctx->next_submit_worker_id != ctx->last_submit_worker_id)
-      vp10_frameworker_copy_context(
+      av1_frameworker_copy_context(
           &ctx->frame_workers[ctx->next_submit_worker_id],
           &ctx->frame_workers[ctx->last_submit_worker_id]);
 
@@ -537,8 +537,8 @@ static void wait_worker_and_cache_frame(aom_codec_alg_priv_t *ctx) {
 
   check_resync(ctx, frame_worker_data->pbi);
 
-  if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
-    VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+  if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+    AV1_COMMON *const cm = &frame_worker_data->pbi->common;
     RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
     ctx->frame_cache[ctx->frame_cache_write].fb_idx = cm->new_fb_idx;
     yuvconfig2image(&ctx->frame_cache[ctx->frame_cache_write].img, &sd,
@@ -573,7 +573,7 @@ static aom_codec_err_t decoder_decode(aom_codec_alg_priv_t *ctx,
     if (res != VPX_CODEC_OK) return res;
   }
 
-  res = vp10_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
+  res = av1_parse_superframe_index(data, data_sz, frame_sizes, &frame_count,
                                     ctx->decrypt_cb, ctx->decrypt_state);
   if (res != VPX_CODEC_OK) return res;
 
@@ -721,8 +721,8 @@ static aom_image_t *decoder_get_frame(aom_codec_alg_priv_t *ctx,
           frame_worker_data->received_frame = 0;
           check_resync(ctx, frame_worker_data->pbi);
         }
-        if (vp10_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
-          VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+        if (av1_get_raw_frame(frame_worker_data->pbi, &sd) == 0) {
+          AV1_COMMON *const cm = &frame_worker_data->pbi->common;
           RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
           release_last_output_frame(ctx);
           ctx->last_show_frame = frame_worker_data->pbi->common.new_fb_idx;
@@ -777,7 +777,7 @@ static aom_codec_err_t ctrl_set_reference(aom_codec_alg_priv_t *ctx,
     VPxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     image2yuvconfig(&frame->img, &sd);
-    return vp10_set_reference_dec(&frame_worker_data->pbi->common,
+    return av1_set_reference_dec(&frame_worker_data->pbi->common,
                                   (VPX_REFFRAME)frame->frame_type, &sd);
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -800,7 +800,7 @@ static aom_codec_err_t ctrl_copy_reference(aom_codec_alg_priv_t *ctx,
     VPxWorker *const worker = ctx->frame_workers;
     FrameWorkerData *const frame_worker_data = (FrameWorkerData *)worker->data1;
     image2yuvconfig(&frame->img, &sd);
-    return vp10_copy_reference_dec(frame_worker_data->pbi,
+    return av1_copy_reference_dec(frame_worker_data->pbi,
                                    (VPX_REFFRAME)frame->frame_type, &sd);
   } else {
     return VPX_CODEC_INVALID_PARAM;
@@ -908,7 +908,7 @@ static aom_codec_err_t ctrl_get_frame_size(aom_codec_alg_priv_t *ctx,
       VPxWorker *const worker = ctx->frame_workers;
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+      const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
       frame_size[0] = cm->width;
       frame_size[1] = cm->height;
       return VPX_CODEC_OK;
@@ -935,7 +935,7 @@ static aom_codec_err_t ctrl_get_render_size(aom_codec_alg_priv_t *ctx,
       VPxWorker *const worker = ctx->frame_workers;
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+      const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
       render_size[0] = cm->render_width;
       render_size[1] = cm->render_height;
       return VPX_CODEC_OK;
@@ -956,7 +956,7 @@ static aom_codec_err_t ctrl_get_bit_depth(aom_codec_alg_priv_t *ctx,
     if (worker) {
       FrameWorkerData *const frame_worker_data =
           (FrameWorkerData *)worker->data1;
-      const VP10_COMMON *const cm = &frame_worker_data->pbi->common;
+      const AV1_COMMON *const cm = &frame_worker_data->pbi->common;
       *bit_depth = cm->bit_depth;
       return VPX_CODEC_OK;
     } else {
@@ -1045,8 +1045,8 @@ static aom_codec_ctrl_fn_map_t decoder_ctrl_maps[] = {
 #ifndef VERSION_STRING
 #define VERSION_STRING
 #endif
-CODEC_INTERFACE(aom_codec_vp10_dx) = {
-  "WebM Project VP10 Decoder" VERSION_STRING,
+CODEC_INTERFACE(aom_codec_av1_dx) = {
+  "WebM Project AV1 Decoder" VERSION_STRING,
   VPX_CODEC_INTERNAL_ABI_VERSION,
   VPX_CODEC_CAP_DECODER |
       VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER,  // aom_codec_caps_t
diff --git a/av1/av1_iface_common.h b/av1/av1_iface_common.h
index 1a91c3b0c795b72058ea04c8004724d21ddbc6cd..b4a01af84e730b228a0b9f85729ef5273839c57e 100644
--- a/av1/av1_iface_common.h
+++ b/av1/av1_iface_common.h
@@ -8,8 +8,8 @@
  * Media Patent License 1.0 was not distributed with this source code in the
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
-#ifndef VP10_VP10_IFACE_COMMON_H_
-#define VP10_VP10_IFACE_COMMON_H_
+#ifndef AV1_AV1_IFACE_COMMON_H_
+#define AV1_AV1_IFACE_COMMON_H_
 
 #include "aom_ports/mem.h"
 
@@ -56,7 +56,7 @@ static void yuvconfig2image(aom_image_t *img, const YV12_BUFFER_CONFIG *yv12,
   img->stride[VPX_PLANE_U] = yv12->uv_stride;
   img->stride[VPX_PLANE_V] = yv12->uv_stride;
   img->stride[VPX_PLANE_ALPHA] = yv12->y_stride;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (yv12->flags & YV12_FLAG_HIGHBITDEPTH) {
     // aom_image_t uses byte strides and a pointer to the first byte
     // of the image.
@@ -71,7 +71,7 @@ static void yuvconfig2image(aom_image_t *img, const YV12_BUFFER_CONFIG *yv12,
     img->stride[VPX_PLANE_V] = 2 * yv12->uv_stride;
     img->stride[VPX_PLANE_ALPHA] = 2 * yv12->y_stride;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   img->bps = bps;
   img->user_priv = user_priv;
   img->img_data = yv12->buffer_alloc;
@@ -104,7 +104,7 @@ static aom_codec_err_t image2yuvconfig(const aom_image_t *img,
   yv12->color_space = img->cs;
   yv12->color_range = img->range;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (img->fmt & VPX_IMG_FMT_HIGHBITDEPTH) {
     // In aom_image_t
     //     planes point to uint8 address of start of data
@@ -128,10 +128,10 @@ static aom_codec_err_t image2yuvconfig(const aom_image_t *img,
   yv12->border = (yv12->y_stride - img->w) / 2;
 #else
   yv12->border = (img->stride[VPX_PLANE_Y] - img->w) / 2;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   yv12->subsampling_x = img->x_chroma_shift;
   yv12->subsampling_y = img->y_chroma_shift;
   return VPX_CODEC_OK;
 }
 
-#endif  // VP10_VP10_IFACE_COMMON_H_
+#endif  // AV1_AV1_IFACE_COMMON_H_
diff --git a/av1/common/alloccommon.c b/av1/common/alloccommon.c
index 0cd5e7cf2b33768e1d6b9d51f6d82ff6df83c764..79264df1bf16d015755eda1c7aa4d6fba9624386 100644
--- a/av1/common/alloccommon.c
+++ b/av1/common/alloccommon.c
@@ -18,7 +18,7 @@
 #include "av1/common/entropymv.h"
 #include "av1/common/onyxc_int.h"
 
-void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) {
+void av1_set_mb_mi(AV1_COMMON *cm, int width, int height) {
   const int aligned_width = ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2);
   const int aligned_height = ALIGN_POWER_OF_TWO(height, MI_SIZE_LOG2);
 
@@ -31,7 +31,7 @@ void vp10_set_mb_mi(VP10_COMMON *cm, int width, int height) {
   cm->MBs = cm->mb_rows * cm->mb_cols;
 }
 
-static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
+static int alloc_seg_map(AV1_COMMON *cm, int seg_map_size) {
   int i;
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
@@ -51,7 +51,7 @@ static int alloc_seg_map(VP10_COMMON *cm, int seg_map_size) {
   return 0;
 }
 
-static void free_seg_map(VP10_COMMON *cm) {
+static void free_seg_map(AV1_COMMON *cm) {
   int i;
 
   for (i = 0; i < NUM_PING_PONG_BUFFERS; ++i) {
@@ -66,7 +66,7 @@ static void free_seg_map(VP10_COMMON *cm) {
   }
 }
 
-void vp10_free_ref_frame_buffers(BufferPool *pool) {
+void av1_free_ref_frame_buffers(BufferPool *pool) {
   int i;
 
   for (i = 0; i < FRAME_BUFFERS; ++i) {
@@ -81,7 +81,7 @@ void vp10_free_ref_frame_buffers(BufferPool *pool) {
   }
 }
 
-void vp10_free_context_buffers(VP10_COMMON *cm) {
+void av1_free_context_buffers(AV1_COMMON *cm) {
   cm->free_mi(cm);
   free_seg_map(cm);
   aom_free(cm->above_context);
@@ -90,10 +90,10 @@ void vp10_free_context_buffers(VP10_COMMON *cm) {
   cm->above_seg_context = NULL;
 }
 
-int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
+int av1_alloc_context_buffers(AV1_COMMON *cm, int width, int height) {
   int new_mi_size;
 
-  vp10_set_mb_mi(cm, width, height);
+  av1_set_mb_mi(cm, width, height);
   new_mi_size = cm->mi_stride * calc_mi_size(cm->mi_rows);
   if (cm->mi_alloc_size < new_mi_size) {
     cm->free_mi(cm);
@@ -123,12 +123,12 @@ int vp10_alloc_context_buffers(VP10_COMMON *cm, int width, int height) {
   return 0;
 
 fail:
-  vp10_free_context_buffers(cm);
+  av1_free_context_buffers(cm);
   return 1;
 }
 
-void vp10_remove_common(VP10_COMMON *cm) {
-  vp10_free_context_buffers(cm);
+void av1_remove_common(AV1_COMMON *cm) {
+  av1_free_context_buffers(cm);
 
   aom_free(cm->fc);
   cm->fc = NULL;
@@ -136,13 +136,13 @@ void vp10_remove_common(VP10_COMMON *cm) {
   cm->frame_contexts = NULL;
 }
 
-void vp10_init_context_buffers(VP10_COMMON *cm) {
+void av1_init_context_buffers(AV1_COMMON *cm) {
   cm->setup_mi(cm);
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
     memset(cm->last_frame_seg_map, 0, cm->mi_rows * cm->mi_cols);
 }
 
-void vp10_swap_current_and_last_seg_map(VP10_COMMON *cm) {
+void av1_swap_current_and_last_seg_map(AV1_COMMON *cm) {
   // Swap indices.
   const int tmp = cm->seg_map_idx;
   cm->seg_map_idx = cm->prev_seg_map_idx;
diff --git a/av1/common/alloccommon.h b/av1/common/alloccommon.h
index 9370d4b3b260760e65696e0e3d3c864b9a5c492c..bbce0ad59c637909ac1c1088a7f67f12833a4a0f 100644
--- a/av1/common/alloccommon.h
+++ b/av1/common/alloccommon.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_ALLOCCOMMON_H_
-#define VP10_COMMON_ALLOCCOMMON_H_
+#ifndef AV1_COMMON_ALLOCCOMMON_H_
+#define AV1_COMMON_ALLOCCOMMON_H_
 
 #define INVALID_IDX -1  // Invalid buffer index.
 
@@ -18,26 +18,26 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct BufferPool;
 
-void vp10_remove_common(struct VP10Common *cm);
+void av1_remove_common(struct AV1Common *cm);
 
-int vp10_alloc_context_buffers(struct VP10Common *cm, int width, int height);
-void vp10_init_context_buffers(struct VP10Common *cm);
-void vp10_free_context_buffers(struct VP10Common *cm);
+int av1_alloc_context_buffers(struct AV1Common *cm, int width, int height);
+void av1_init_context_buffers(struct AV1Common *cm);
+void av1_free_context_buffers(struct AV1Common *cm);
 
-void vp10_free_ref_frame_buffers(struct BufferPool *pool);
+void av1_free_ref_frame_buffers(struct BufferPool *pool);
 
-int vp10_alloc_state_buffers(struct VP10Common *cm, int width, int height);
-void vp10_free_state_buffers(struct VP10Common *cm);
+int av1_alloc_state_buffers(struct AV1Common *cm, int width, int height);
+void av1_free_state_buffers(struct AV1Common *cm);
 
-void vp10_set_mb_mi(struct VP10Common *cm, int width, int height);
+void av1_set_mb_mi(struct AV1Common *cm, int width, int height);
 
-void vp10_swap_current_and_last_seg_map(struct VP10Common *cm);
+void av1_swap_current_and_last_seg_map(struct AV1Common *cm);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ALLOCCOMMON_H_
+#endif  // AV1_COMMON_ALLOCCOMMON_H_
diff --git a/av1/common/arm/neon/iht4x4_add_neon.c b/av1/common/arm/neon/iht4x4_add_neon.c
index d7f3e9cf8225489ca9d315bb4324b34747f05736..f228f3aee95d5eb44ca5b17bebf78d29b2194fca 100644
--- a/av1/common/arm/neon/iht4x4_add_neon.c
+++ b/av1/common/arm/neon/iht4x4_add_neon.c
@@ -140,7 +140,7 @@ static INLINE void IADST4x4_1D(int16x4_t *d3s16, int16x4_t *d4s16,
   return;
 }
 
-void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
+void av1_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
                              int dest_stride, int tx_type) {
   uint8x8_t d26u8, d27u8;
   int16x4_t d0s16, d1s16, d2s16, d3s16, d4s16, d5s16;
@@ -157,7 +157,7 @@ void vp10_iht4x4_16_add_neon(const tran_low_t *input, uint8_t *dest,
 
   switch (tx_type) {
     case 0:  // idct_idct is not supported. Fall back to C
-      vp10_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
+      av1_iht4x4_16_add_c(input, dest, dest_stride, tx_type);
       return;
       break;
     case 1:  // iadst_idct
diff --git a/av1/common/arm/neon/iht8x8_add_neon.c b/av1/common/arm/neon/iht8x8_add_neon.c
index f90d192186e24c70ec3001e45070bc8ecbb14792..457b8299ddf8844b93241e5a149637d34e58b74b 100644
--- a/av1/common/arm/neon/iht8x8_add_neon.c
+++ b/av1/common/arm/neon/iht8x8_add_neon.c
@@ -472,7 +472,7 @@ static INLINE void IADST8X8_1D(int16x8_t *q8s16, int16x8_t *q9s16,
   return;
 }
 
-void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
+void av1_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
                              int dest_stride, int tx_type) {
   int i;
   uint8_t *d1, *d2;
@@ -495,7 +495,7 @@ void vp10_iht8x8_64_add_neon(const tran_low_t *input, uint8_t *dest,
 
   switch (tx_type) {
     case 0:  // idct_idct is not supported. Fall back to C
-      vp10_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
+      av1_iht8x8_64_add_c(input, dest, dest_stride, tx_type);
       return;
       break;
     case 1:  // iadst_idct
diff --git a/av1/common/av1_fwd_txfm.c b/av1/common/av1_fwd_txfm.c
index ac68eb9db8a715edeec0c7f4736cad0478f2ff48..7ae3b325f60bab7d09bcc407dc840f832022ba9a 100644
--- a/av1/common/av1_fwd_txfm.c
+++ b/av1/common/av1_fwd_txfm.c
@@ -11,7 +11,7 @@
 
 #include "av1/common/av1_fwd_txfm.h"
 
-void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -77,7 +77,7 @@ void vp10_fdct4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   }
 }
 
-void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 4; ++r)
@@ -87,7 +87,7 @@ void vp10_fdct4x4_1_c(const int16_t *input, tran_low_t *output, int stride) {
   output[1] = 0;
 }
 
-void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void av1_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
                     int stride) {
   int i, j;
   tran_low_t intermediate[64];
@@ -173,7 +173,7 @@ void vp10_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
   }
 }
 
-void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 8; ++r)
@@ -183,7 +183,7 @@ void vp10_fdct8x8_1_c(const int16_t *input, tran_low_t *output, int stride) {
   output[1] = 0;
 }
 
-void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
   // The 2D transform is done with two passes which are actually pretty
   // similar. In the first one, we transform the columns and transpose
   // the results. In the second one, we transform the rows. To achieve that,
@@ -363,7 +363,7 @@ void vp10_fdct16x16_c(const int16_t *input, tran_low_t *output, int stride) {
   }
 }
 
-void vp10_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct16x16_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 16; ++r)
@@ -386,7 +386,7 @@ static INLINE tran_high_t half_round_shift(tran_high_t input) {
   return rv;
 }
 
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   tran_high_t step[32];
   // Stage 1
   step[0] = input[0] + input[(32 - 1)];
@@ -709,7 +709,7 @@ void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round) {
   output[31] = dct_32_round(step[31] * cospi_31_64 + step[16] * -cospi_1_64);
 }
 
-void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -717,7 +717,7 @@ void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
@@ -726,7 +726,7 @@ void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
@@ -736,7 +736,7 @@ void vp10_fdct32x32_c(const int16_t *input, tran_low_t *out, int stride) {
 // Note that although we use dct_32_round in dct32 computation flow,
 // this 2d fdct32x32 for rate-distortion optimization loop is operating
 // within 16 bits precision.
-void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
+void av1_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   int i, j;
   tran_high_t output[32 * 32];
 
@@ -744,11 +744,11 @@ void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = input[j * stride + i] * 4;
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       // TODO(cd): see quality impact of only doing
       //           output[j * 32 + i] = (temp_out[j] + 1) >> 2;
-      //           PS: also change code in vp10_dsp/x86/vp10_dct_sse2.c
+      //           PS: also change code in av1_dsp/x86/av1_dct_sse2.c
       output[j * 32 + i] = (temp_out[j] + 1 + (temp_out[j] > 0)) >> 2;
   }
 
@@ -756,12 +756,12 @@ void vp10_fdct32x32_rd_c(const int16_t *input, tran_low_t *out, int stride) {
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = output[j + i * 32];
-    vp10_fdct32(temp_in, temp_out, 1);
+    av1_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
 
-void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   int r, c;
   tran_low_t sum = 0;
   for (r = 0; r < 32; ++r)
@@ -771,44 +771,44 @@ void vp10_fdct32x32_1_c(const int16_t *input, tran_low_t *output, int stride) {
   output[1] = 0;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fdct4x4_c(const int16_t *input, tran_low_t *output,
                            int stride) {
-  vp10_fdct4x4_c(input, output, stride);
+  av1_fdct4x4_c(input, output, stride);
 }
 
-void vp10_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
+void av1_highbd_fdct8x8_c(const int16_t *input, tran_low_t *final_output,
                            int stride) {
-  vp10_fdct8x8_c(input, final_output, stride);
+  av1_fdct8x8_c(input, final_output, stride);
 }
 
-void vp10_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
+void av1_highbd_fdct8x8_1_c(const int16_t *input, tran_low_t *final_output,
                              int stride) {
-  vp10_fdct8x8_1_c(input, final_output, stride);
+  av1_fdct8x8_1_c(input, final_output, stride);
 }
 
-void vp10_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_c(const int16_t *input, tran_low_t *output,
                              int stride) {
-  vp10_fdct16x16_c(input, output, stride);
+  av1_fdct16x16_c(input, output, stride);
 }
 
-void vp10_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fdct16x16_1_c(const int16_t *input, tran_low_t *output,
                                int stride) {
-  vp10_fdct16x16_1_c(input, output, stride);
+  av1_fdct16x16_1_c(input, output, stride);
 }
 
-void vp10_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
+void av1_highbd_fdct32x32_c(const int16_t *input, tran_low_t *out,
                              int stride) {
-  vp10_fdct32x32_c(input, out, stride);
+  av1_fdct32x32_c(input, out, stride);
 }
 
-void vp10_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
+void av1_highbd_fdct32x32_rd_c(const int16_t *input, tran_low_t *out,
                                 int stride) {
-  vp10_fdct32x32_rd_c(input, out, stride);
+  av1_fdct32x32_rd_c(input, out, stride);
 }
 
-void vp10_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
+void av1_highbd_fdct32x32_1_c(const int16_t *input, tran_low_t *out,
                                int stride) {
-  vp10_fdct32x32_1_c(input, out, stride);
+  av1_fdct32x32_1_c(input, out, stride);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_fwd_txfm.h b/av1/common/av1_fwd_txfm.h
index a8b17a384023db607e4fd11e239fadfde866442b..db763e5de0675ee93b68a2773bcc9f98a677438a 100644
--- a/av1/common/av1_fwd_txfm.h
+++ b/av1/common/av1_fwd_txfm.h
@@ -9,11 +9,11 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_VP10_FWD_TXFM_H_
-#define VP10_COMMON_VP10_FWD_TXFM_H_
+#ifndef AV1_COMMON_AV1_FWD_TXFM_H_
+#define AV1_COMMON_AV1_FWD_TXFM_H_
 
 #include "aom_dsp/txfm_common.h"
 #include "aom_dsp/fwd_txfm.h"
 
-void vp10_fdct32(const tran_high_t *input, tran_high_t *output, int round);
-#endif  // VP10_COMMON_VP10_FWD_TXFM_H_
+void av1_fdct32(const tran_high_t *input, tran_high_t *output, int round);
+#endif  // AV1_COMMON_AV1_FWD_TXFM_H_
diff --git a/av1/common/av1_inv_txfm.c b/av1/common/av1_inv_txfm.c
index 072593a3ebd9c38d35978bb446b58d230cc86130..0a7f6b90d4be240c75d276344ead0c31bd827059 100644
--- a/av1/common/av1_inv_txfm.c
+++ b/av1/common/av1_inv_txfm.c
@@ -13,7 +13,7 @@
 
 #include "av1/common/av1_inv_txfm.h"
 
-void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
   int i;
@@ -65,7 +65,7 @@ void vp10_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
+void av1_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
                           int dest_stride) {
   int i;
   tran_high_t a1, e1;
@@ -92,7 +92,7 @@ void vp10_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest,
   }
 }
 
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   // stage 1
@@ -112,7 +112,7 @@ void vp10_idct4_c(const tran_low_t *input, tran_low_t *output) {
   output[3] = WRAPLOW(step[0] - step[3], 8);
 }
 
-void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
   int i, j;
@@ -120,7 +120,7 @@ void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vp10_idct4_c(input, outptr);
+    av1_idct4_c(input, outptr);
     input += 4;
     outptr += 4;
   }
@@ -128,7 +128,7 @@ void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vp10_idct4_c(temp_in, temp_out);
+    av1_idct4_c(temp_in, temp_out);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 4));
@@ -136,7 +136,7 @@ void vp10_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
                           int dest_stride) {
   int i;
   tran_high_t a1;
@@ -153,7 +153,7 @@ void vp10_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -207,7 +207,7 @@ void vp10_idct8_c(const tran_low_t *input, tran_low_t *output) {
   output[7] = WRAPLOW(step1[0] - step1[7], 8);
 }
 
-void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
   int i, j;
@@ -215,7 +215,7 @@ void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
 
   // First transform rows
   for (i = 0; i < 8; ++i) {
-    vp10_idct8_c(input, outptr);
+    av1_idct8_c(input, outptr);
     input += 8;
     outptr += 8;
   }
@@ -223,7 +223,7 @@ void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   // Then transform columns
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_idct8_c(temp_in, temp_out);
+    av1_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -231,7 +231,7 @@ void vp10_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   int i, j;
   tran_high_t a1;
   tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8);
@@ -243,7 +243,7 @@ void vp10_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -280,7 +280,7 @@ void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output) {
   output[3] = WRAPLOW(dct_const_round_shift(s0 + s1 - s3), 8);
 }
 
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output) {
   int s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_high_t x0 = input[7];
@@ -357,7 +357,7 @@ void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output) {
   output[7] = WRAPLOW(-x1, 8);
 }
 
-void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
   int i, j;
@@ -366,7 +366,7 @@ void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   // First transform rows
   // only first 4 row has non-zero coefs
   for (i = 0; i < 4; ++i) {
-    vp10_idct8_c(input, outptr);
+    av1_idct8_c(input, outptr);
     input += 8;
     outptr += 8;
   }
@@ -374,7 +374,7 @@ void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   // Then transform columns
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_idct8_c(temp_in, temp_out);
+    av1_idct8_c(temp_in, temp_out);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 5));
@@ -382,7 +382,7 @@ void vp10_idct8x8_12_add_c(const tran_low_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
 
@@ -547,7 +547,7 @@ void vp10_idct16_c(const tran_low_t *input, tran_low_t *output) {
   output[15] = WRAPLOW(step2[0] - step2[15], 8);
 }
 
-void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
                               int stride) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -556,7 +556,7 @@ void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
 
   // First transform rows
   for (i = 0; i < 16; ++i) {
-    vp10_idct16_c(input, outptr);
+    av1_idct16_c(input, outptr);
     input += 16;
     outptr += 16;
   }
@@ -564,7 +564,7 @@ void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
   // Then transform columns
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_idct16_c(temp_in, temp_out);
+    av1_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -572,7 +572,7 @@ void vp10_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
 
@@ -743,7 +743,7 @@ void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output) {
   output[15] = WRAPLOW(-x1, 8);
 }
 
-void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
@@ -753,7 +753,7 @@ void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vp10_idct16_c(input, outptr);
+    av1_idct16_c(input, outptr);
     input += 16;
     outptr += 16;
   }
@@ -761,7 +761,7 @@ void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
   // Then transform columns
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_idct16_c(temp_in, temp_out);
+    av1_idct16_c(temp_in, temp_out);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -769,7 +769,7 @@ void vp10_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   int i, j;
   tran_high_t a1;
@@ -782,7 +782,7 @@ void vp10_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output) {
   tran_low_t step1[32], step2[32];
   tran_high_t temp1, temp2;
 
@@ -1149,7 +1149,7 @@ void vp10_idct32_c(const tran_low_t *input, tran_low_t *output) {
   output[31] = WRAPLOW(step1[0] - step1[31], 8);
 }
 
-void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
                                int stride) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
@@ -1168,7 +1168,7 @@ void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
       zero_coeff[j] = zero_coeff[2 * j] | zero_coeff[2 * j + 1];
 
     if (zero_coeff[0] | zero_coeff[1])
-      vp10_idct32_c(input, outptr);
+      av1_idct32_c(input, outptr);
     else
       memset(outptr, 0, sizeof(tran_low_t) * 32);
     input += 32;
@@ -1178,7 +1178,7 @@ void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vp10_idct32_c(temp_in, temp_out);
+    av1_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1186,7 +1186,7 @@ void vp10_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
                              int stride) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
@@ -1196,7 +1196,7 @@ void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
   // Rows
   // only upper-left 8x8 has non-zero coeff
   for (i = 0; i < 8; ++i) {
-    vp10_idct32_c(input, outptr);
+    av1_idct32_c(input, outptr);
     input += 32;
     outptr += 32;
   }
@@ -1204,7 +1204,7 @@ void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
   // Columns
   for (i = 0; i < 32; ++i) {
     for (j = 0; j < 32; ++j) temp_in[j] = out[j * 32 + i];
-    vp10_idct32_c(temp_in, temp_out);
+    av1_idct32_c(temp_in, temp_out);
     for (j = 0; j < 32; ++j) {
       dest[j * stride + i] = clip_pixel_add(dest[j * stride + i],
                                             ROUND_POWER_OF_TWO(temp_out[j], 6));
@@ -1212,7 +1212,7 @@ void vp10_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
+void av1_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
                             int stride) {
   int i, j;
   tran_high_t a1;
@@ -1227,8 +1227,8 @@ void vp10_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   /* 4-point reversible, orthonormal inverse Walsh-Hadamard in 3.5 adds,
      0.5 shifts per pixel. */
@@ -1282,7 +1282,7 @@ void vp10_highbd_iwht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
+void av1_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
                                  int dest_stride, int bd) {
   int i;
   tran_high_t a1, e1;
@@ -1315,7 +1315,7 @@ void vp10_highbd_iwht4x4_1_add_c(const tran_low_t *in, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step[4];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1336,7 +1336,7 @@ void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   output[3] = WRAPLOW(step[0] - step[3], bd);
 }
 
-void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
@@ -1346,7 +1346,7 @@ void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // Rows
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct4_c(input, outptr, bd);
+    av1_highbd_idct4_c(input, outptr, bd);
     input += 4;
     outptr += 4;
   }
@@ -1354,7 +1354,7 @@ void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
   // Columns
   for (i = 0; i < 4; ++i) {
     for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-    vp10_highbd_idct4_c(temp_in, temp_out, bd);
+    av1_highbd_idct4_c(temp_in, temp_out, bd);
     for (j = 0; j < 4; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -1362,7 +1362,7 @@ void vp10_highbd_idct4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int dest_stride, int bd) {
   int i;
   tran_high_t a1;
@@ -1382,7 +1382,7 @@ void vp10_highbd_idct4x4_1_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[8], step2[8];
   tran_high_t temp1, temp2;
   // stage 1
@@ -1400,7 +1400,7 @@ void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   step1[6] = WRAPLOW(highbd_dct_const_round_shift(temp2, bd), bd);
 
   // stage 2 & stage 3 - even half
-  vp10_highbd_idct4_c(step1, step1, bd);
+  av1_highbd_idct4_c(step1, step1, bd);
 
   // stage 2 - odd half
   step2[4] = WRAPLOW(step1[4] + step1[5], bd);
@@ -1427,7 +1427,7 @@ void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   output[7] = WRAPLOW(step1[0] - step1[7], bd);
 }
 
-void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
@@ -1437,7 +1437,7 @@ void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // First transform rows.
   for (i = 0; i < 8; ++i) {
-    vp10_highbd_idct8_c(input, outptr, bd);
+    av1_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
@@ -1445,7 +1445,7 @@ void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_highbd_idct8_c(temp_in, temp_out, bd);
+    av1_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1453,7 +1453,7 @@ void vp10_highbd_idct8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int bd) {
   int i, j;
   tran_high_t a1;
@@ -1468,7 +1468,7 @@ void vp10_highbd_idct8x8_1_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[0];
@@ -1506,7 +1506,7 @@ void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd) {
   output[3] = WRAPLOW(highbd_dct_const_round_shift(s0 + s1 - s3, bd), bd);
 }
 
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7;
 
   tran_low_t x0 = input[7];
@@ -1583,7 +1583,7 @@ void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd) {
   output[7] = WRAPLOW(-x1, bd);
 }
 
-void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
                                   int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
@@ -1594,14 +1594,14 @@ void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
   // First transform rows.
   // Only first 4 row has non-zero coefs.
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct8_c(input, outptr, bd);
+    av1_highbd_idct8_c(input, outptr, bd);
     input += 8;
     outptr += 8;
   }
   // Then transform columns.
   for (i = 0; i < 8; ++i) {
     for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-    vp10_highbd_idct8_c(temp_in, temp_out, bd);
+    av1_highbd_idct8_c(temp_in, temp_out, bd);
     for (j = 0; j < 8; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -1609,7 +1609,7 @@ void vp10_highbd_idct8x8_10_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   tran_low_t step1[16], step2[16];
   tran_high_t temp1, temp2;
   (void)bd;
@@ -1775,7 +1775,7 @@ void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd) {
   output[15] = WRAPLOW(step2[0] - step2[15], bd);
 }
 
-void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
                                      int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -1785,7 +1785,7 @@ void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
 
   // First transform rows.
   for (i = 0; i < 16; ++i) {
-    vp10_highbd_idct16_c(input, outptr, bd);
+    av1_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1793,7 +1793,7 @@ void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_highbd_idct16_c(temp_in, temp_out, bd);
+    av1_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1801,7 +1801,7 @@ void vp10_highbd_idct16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
                            int bd) {
   tran_high_t s0, s1, s2, s3, s4, s5, s6, s7, s8;
   tran_high_t s9, s10, s11, s12, s13, s14, s15;
@@ -1972,7 +1972,7 @@ void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output,
   output[15] = WRAPLOW(-x1, bd);
 }
 
-void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
@@ -1983,7 +1983,7 @@ void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
   // First transform rows. Since all non-zero dct coefficients are in
   // upper-left 4x4 area, we only need to calculate first 4 rows here.
   for (i = 0; i < 4; ++i) {
-    vp10_highbd_idct16_c(input, outptr, bd);
+    av1_highbd_idct16_c(input, outptr, bd);
     input += 16;
     outptr += 16;
   }
@@ -1991,7 +1991,7 @@ void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
   // Then transform columns.
   for (i = 0; i < 16; ++i) {
     for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-    vp10_highbd_idct16_c(temp_in, temp_out, bd);
+    av1_highbd_idct16_c(temp_in, temp_out, bd);
     for (j = 0; j < 16; ++j) {
       dest[j * stride + i] = highbd_clip_pixel_add(
           dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -1999,7 +1999,7 @@ void vp10_highbd_idct16x16_10_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int bd) {
   int i, j;
   tran_high_t a1;
@@ -2384,7 +2384,7 @@ static void highbd_idct32_c(const tran_low_t *input, tran_low_t *output,
   output[31] = WRAPLOW(step1[0] - step1[31], bd);
 }
 
-void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
                                       int stride, int bd) {
   tran_low_t out[32 * 32];
   tran_low_t *outptr = out;
@@ -2422,7 +2422,7 @@ void vp10_highbd_idct32x32_1024_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int bd) {
   tran_low_t out[32 * 32] = { 0 };
   tran_low_t *outptr = out;
@@ -2448,7 +2448,7 @@ void vp10_highbd_idct32x32_34_add_c(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
                                    int stride, int bd) {
   int i, j;
   int a1;
@@ -2464,4 +2464,4 @@ void vp10_highbd_idct32x32_1_add_c(const tran_low_t *input, uint8_t *dest8,
     dest += stride;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/av1_inv_txfm.h b/av1/common/av1_inv_txfm.h
index 032d7f18e92cfe053805695a89197ef903640b4b..eb5254ed49e0918c00d412cd1f4df80aa3d17cac 100644
--- a/av1/common/av1_inv_txfm.h
+++ b/av1/common/av1_inv_txfm.h
@@ -41,7 +41,7 @@ static INLINE tran_low_t dct_const_round_shift(tran_high_t input) {
   return check_range(rv);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE tran_low_t highbd_check_range(tran_high_t input, int bd) {
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
   // For valid highbitdepth streams, intermediate stage coefficients will
@@ -64,7 +64,7 @@ static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input,
   tran_high_t rv = ROUND_POWER_OF_TWO(input, DCT_CONST_BITS);
   return highbd_check_range(rv, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if CONFIG_EMULATE_HARDWARE
 // When CONFIG_EMULATE_HARDWARE is 1 the transform performs a
@@ -88,22 +88,22 @@ static INLINE tran_low_t highbd_dct_const_round_shift(tran_high_t input,
 #define WRAPLOW(x, bd) ((int32_t)(x))
 #endif  // CONFIG_EMULATE_HARDWARE
 
-void vp10_idct4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct16_c(const tran_low_t *input, tran_low_t *output);
-void vp10_idct32_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst4_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst8_c(const tran_low_t *input, tran_low_t *output);
-void vp10_iadst16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct4_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct8_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct16_c(const tran_low_t *input, tran_low_t *output);
+void av1_idct32_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst4_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst8_c(const tran_low_t *input, tran_low_t *output);
+void av1_iadst16_c(const tran_low_t *input, tran_low_t *output);
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_idct4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_idct16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
-void vp10_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
-void vp10_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst4_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst8_c(const tran_low_t *input, tran_low_t *output, int bd);
+void av1_highbd_iadst16_c(const tran_low_t *input, tran_low_t *output, int bd);
 
 static INLINE uint16_t highbd_clip_pixel_add(uint16_t dest, tran_high_t trans,
                                              int bd) {
diff --git a/av1/common/av1_rtcd_defs.pl b/av1/common/av1_rtcd_defs.pl
index 094da4e8b47a2f505c0ddf323c03ca26b516d150..afa68201c9fb5036a302fd9db9a062f7ea462886 100644
--- a/av1/common/av1_rtcd_defs.pl
+++ b/av1/common/av1_rtcd_defs.pl
@@ -1,7 +1,7 @@
-sub vp10_common_forward_decls() {
+sub av1_common_forward_decls() {
 print <<EOF
 /*
- * VP10
+ * AV1
  */
 
 #include "aom/aom_integer.h"
@@ -19,7 +19,7 @@ union int_mv;
 struct yv12_buffer_config;
 EOF
 }
-forward_decls qw/vp10_common_forward_decls/;
+forward_decls qw/av1_common_forward_decls/;
 
 # x86inc.asm had specific constraints. break it out so it's easy to disable.
 # zero all the variables to avoid tricky else conditions.
@@ -57,314 +57,314 @@ if ($opts{arch} eq "x86_64") {
 #
 # dct
 #
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   # Note as optimized versions of these functions are added we need to add a check to ensure
   # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
   if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add/;
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add/;
 
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add/;
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add/;
 
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
 
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4/;
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4/;
 
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1/;
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1/;
 
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8/;
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8/;
 
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1/;
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1/;
 
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16/;
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16/;
 
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1/;
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1/;
 
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32/;
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32/;
 
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd/;
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd/;
 
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1/;
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1/;
 
-    add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct4x4/;
+    add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct4x4/;
 
-    add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8/;
+    add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8/;
 
-    add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8_1/;
+    add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8_1/;
 
-    add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16/;
+    add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16/;
 
-    add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16_1/;
+    add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16_1/;
 
-    add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32/;
+    add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32/;
 
-    add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_rd/;
+    add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_rd/;
 
-    add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_1/;
+    add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_1/;
   } else {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add sse2/;
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add sse2/;
 
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add sse2/;
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add sse2/;
 
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
 
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4 sse2/;
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4 sse2/;
 
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1 sse2/;
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1 sse2/;
 
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8 sse2/;
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8 sse2/;
 
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1 sse2/;
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1 sse2/;
 
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16 sse2/;
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16 sse2/;
 
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1 sse2/;
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1 sse2/;
 
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32 sse2/;
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32 sse2/;
 
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd sse2/;
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd sse2/;
 
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1 sse2/;
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1 sse2/;
 
-    add_proto qw/void vp10_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct4x4 sse2/;
+    add_proto qw/void av1_highbd_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct4x4 sse2/;
 
-    add_proto qw/void vp10_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8 sse2/;
+    add_proto qw/void av1_highbd_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8 sse2/;
 
-    add_proto qw/void vp10_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct8x8_1/;
+    add_proto qw/void av1_highbd_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct8x8_1/;
 
-    add_proto qw/void vp10_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16 sse2/;
+    add_proto qw/void av1_highbd_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16 sse2/;
 
-    add_proto qw/void vp10_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct16x16_1/;
+    add_proto qw/void av1_highbd_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct16x16_1/;
 
-    add_proto qw/void vp10_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32 sse2/;
+    add_proto qw/void av1_highbd_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32 sse2/;
 
-    add_proto qw/void vp10_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_rd sse2/;
+    add_proto qw/void av1_highbd_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_rd sse2/;
 
-    add_proto qw/void vp10_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_highbd_fdct32x32_1/;
+    add_proto qw/void av1_highbd_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_highbd_fdct32x32_1/;
   }
 } else {
   # Force C versions if CONFIG_EMULATE_HARDWARE is 1
   if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add/;
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add/;
 
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add/;
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add/;
 
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add/;
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add/;
 
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4/;
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4/;
 
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1/;
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1/;
 
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8/;
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8/;
 
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1/;
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1/;
 
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16/;
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16/;
 
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1/;
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1/;
 
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32/;
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32/;
 
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd/;
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd/;
 
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1/;
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1/;
   } else {
-    add_proto qw/void vp10_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht4x4_16_add sse2 neon dspr2 msa/;
+    add_proto qw/void av1_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht4x4_16_add sse2 neon dspr2 msa/;
 
-    add_proto qw/void vp10_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
-    specialize qw/vp10_iht8x8_64_add sse2 neon dspr2 msa/;
+    add_proto qw/void av1_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type";
+    specialize qw/av1_iht8x8_64_add sse2 neon dspr2 msa/;
 
-    add_proto qw/void vp10_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
-    specialize qw/vp10_iht16x16_256_add sse2 dspr2 msa/;
+    add_proto qw/void av1_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type";
+    specialize qw/av1_iht16x16_256_add sse2 dspr2 msa/;
 
-    add_proto qw/void vp10_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4 sse2/;
+    add_proto qw/void av1_fdct4x4/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4 sse2/;
 
-    add_proto qw/void vp10_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct4x4_1 sse2/;
+    add_proto qw/void av1_fdct4x4_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct4x4_1 sse2/;
 
-    add_proto qw/void vp10_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8 sse2/;
+    add_proto qw/void av1_fdct8x8/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8 sse2/;
 
-    add_proto qw/void vp10_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct8x8_1 sse2/;
+    add_proto qw/void av1_fdct8x8_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct8x8_1 sse2/;
 
-    add_proto qw/void vp10_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16 sse2/;
+    add_proto qw/void av1_fdct16x16/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16 sse2/;
 
-    add_proto qw/void vp10_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct16x16_1 sse2/;
+    add_proto qw/void av1_fdct16x16_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct16x16_1 sse2/;
 
-    add_proto qw/void vp10_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32 sse2/;
+    add_proto qw/void av1_fdct32x32/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32 sse2/;
 
-    add_proto qw/void vp10_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_rd sse2/;
+    add_proto qw/void av1_fdct32x32_rd/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_rd sse2/;
 
-    add_proto qw/void vp10_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
-    specialize qw/vp10_fdct32x32_1 sse2/;
+    add_proto qw/void av1_fdct32x32_1/, "const int16_t *input, tran_low_t *output, int stride";
+    specialize qw/av1_fdct32x32_1 sse2/;
   }
 }
 
 # High bitdepth functions
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   #
   # Sub Pixel Filters
   #
-  add_proto qw/void vp10_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve_copy/;
+  add_proto qw/void av1_highbd_convolve_copy/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve_copy/;
 
-  add_proto qw/void vp10_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve_avg/;
+  add_proto qw/void av1_highbd_convolve_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve_avg/;
 
-  add_proto qw/void vp10_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8/, "$sse2_x86_64";
+  add_proto qw/void av1_highbd_convolve8/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8/, "$sse2_x86_64";
 
-  add_proto qw/void vp10_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_horiz/, "$sse2_x86_64";
+  add_proto qw/void av1_highbd_convolve8_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_horiz/, "$sse2_x86_64";
 
-  add_proto qw/void vp10_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_vert/, "$sse2_x86_64";
+  add_proto qw/void av1_highbd_convolve8_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_vert/, "$sse2_x86_64";
 
-  add_proto qw/void vp10_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg/, "$sse2_x86_64";
+  add_proto qw/void av1_highbd_convolve8_avg/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg/, "$sse2_x86_64";
 
-  add_proto qw/void vp10_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
+  add_proto qw/void av1_highbd_convolve8_avg_horiz/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg_horiz/, "$sse2_x86_64";
 
-  add_proto qw/void vp10_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
-  specialize qw/vp10_highbd_convolve8_avg_vert/, "$sse2_x86_64";
+  add_proto qw/void av1_highbd_convolve8_avg_vert/, "const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, ptrdiff_t dst_stride, const int16_t *filter_x, int x_step_q4, const int16_t *filter_y, int y_step_q4, int w, int h, int bps";
+  specialize qw/av1_highbd_convolve8_avg_vert/, "$sse2_x86_64";
 
   #
   # dct
   #
   # Note as optimized versions of these functions are added we need to add a check to ensure
   # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vp10_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht4x4_16_add/;
+  add_proto qw/void av1_highbd_iht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht4x4_16_add/;
 
-  add_proto qw/void vp10_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht8x8_64_add/;
+  add_proto qw/void av1_highbd_iht8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int tx_type, int bd";
+  specialize qw/av1_highbd_iht8x8_64_add/;
 
-  add_proto qw/void vp10_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
-  specialize qw/vp10_highbd_iht16x16_256_add/;
+  add_proto qw/void av1_highbd_iht16x16_256_add/, "const tran_low_t *input, uint8_t *output, int pitch, int tx_type, int bd";
+  specialize qw/av1_highbd_iht16x16_256_add/;
 }
 
 #
 # Encoder functions below this point.
 #
-if (aom_config("CONFIG_VP10_ENCODER") eq "yes") {
+if (aom_config("CONFIG_AV1_ENCODER") eq "yes") {
 
 # ENCODEMB INVOKE
 
 if (aom_config("CONFIG_AOM_QM") eq "yes") {
-  if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
     # the transform coefficients are held in 32-bit
-    # values, so the assembler code for  vp10_block_error can no longer be used.
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error/;
+    # values, so the assembler code for  av1_block_error can no longer be used.
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error/;
 
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
 
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
 
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
-    specialize qw/vp10_fdct8x8_quant/;
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    specialize qw/av1_fdct8x8_quant/;
   } else {
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
 
-    add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
-    specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
+    add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+    specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
 
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
 
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
 
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t *iqm_ptr";
   }
 } else {
-  if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+  if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
     # the transform coefficients are held in 32-bit
-    # values, so the assembler code for  vp10_block_error can no longer be used.
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error/;
+    # values, so the assembler code for  av1_block_error can no longer be used.
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error/;
 
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp/;
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp/;
 
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp_32x32/;
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp_32x32/;
 
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_fdct8x8_quant/;
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_fdct8x8_quant/;
   } else {
-    add_proto qw/int64_t vp10_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
-    specialize qw/vp10_block_error avx2 msa/, "$sse2_x86inc";
+    add_proto qw/int64_t av1_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz";
+    specialize qw/av1_block_error avx2 msa/, "$sse2_x86inc";
 
-    add_proto qw/int64_t vp10_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
-    specialize qw/vp10_block_error_fp neon/, "$sse2_x86inc";
+    add_proto qw/int64_t av1_block_error_fp/, "const int16_t *coeff, const int16_t *dqcoeff, int block_size";
+    specialize qw/av1_block_error_fp neon/, "$sse2_x86inc";
 
-    add_proto qw/void vp10_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp neon sse2/, "$ssse3_x86_64_x86inc";
+    add_proto qw/void av1_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp neon sse2/, "$ssse3_x86_64_x86inc";
 
-    add_proto qw/void vp10_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_quantize_fp_32x32/, "$ssse3_x86_64_x86inc";
+    add_proto qw/void av1_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_quantize_fp_32x32/, "$ssse3_x86_64_x86inc";
 
-    add_proto qw/void vp10_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_fdct8x8_quant sse2 ssse3 neon/;
+    add_proto qw/void av1_fdct8x8_quant/, "const int16_t *input, int stride, tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_fdct8x8_quant sse2 ssse3 neon/;
   }
 
 }
@@ -372,269 +372,269 @@ if (aom_config("CONFIG_AOM_QM") eq "yes") {
 
 # fdct functions
 
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
-  add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x4 sse2/;
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
+  add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x4 sse2/;
 
-  add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x8 sse2/;
+  add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x8 sse2/;
 
-  add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x16 sse2/;
+  add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x16 sse2/;
 
-  add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_fwht4x4/, "$mmx_x86inc";
+  add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_fwht4x4/, "$mmx_x86inc";
 } else {
-  add_proto qw/void vp10_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht4x4 sse2 msa/;
+  add_proto qw/void av1_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht4x4 sse2 msa/;
 
-  add_proto qw/void vp10_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht8x8 sse2 msa/;
+  add_proto qw/void av1_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht8x8 sse2 msa/;
 
-  add_proto qw/void vp10_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_fht16x16 sse2 msa/;
+  add_proto qw/void av1_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_fht16x16 sse2 msa/;
 
-  add_proto qw/void vp10_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_fwht4x4 msa/, "$mmx_x86inc";
+  add_proto qw/void av1_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_fwht4x4 msa/, "$mmx_x86inc";
 }
 
 # Inverse transform
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
   # Note as optimized versions of these functions are added we need to add a check to ensure
   # that when CONFIG_EMULATE_HARDWARE is on, it defaults to the C versions only.
-  add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct4x4_1_add/;
+  add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct4x4_1_add/;
 
-  add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct4x4_16_add/;
+  add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct4x4_16_add/;
 
-  add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_1_add/;
+  add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_1_add/;
 
-  add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_64_add/;
+  add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_64_add/;
 
-  add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct8x8_12_add/;
+  add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct8x8_12_add/;
 
-  add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_1_add/;
+  add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_1_add/;
 
-  add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_256_add/;
+  add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_256_add/;
 
-  add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct16x16_10_add/;
+  add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct16x16_10_add/;
 
-  add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_1024_add/;
+  add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_1024_add/;
 
-  add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_34_add/;
+  add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_34_add/;
 
-  add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_idct32x32_1_add/;
+  add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_idct32x32_1_add/;
 
-  add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_iwht4x4_1_add/;
+  add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_iwht4x4_1_add/;
 
-  add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-  specialize qw/vp10_iwht4x4_16_add/;
+  add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+  specialize qw/av1_iwht4x4_16_add/;
 
-  add_proto qw/void vp10_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct4x4_1_add/;
+  add_proto qw/void av1_highbd_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct4x4_1_add/;
 
-  add_proto qw/void vp10_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct8x8_1_add/;
+  add_proto qw/void av1_highbd_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct8x8_1_add/;
 
-  add_proto qw/void vp10_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct16x16_1_add/;
+  add_proto qw/void av1_highbd_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct16x16_1_add/;
 
-  add_proto qw/void vp10_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_1024_add/;
+  add_proto qw/void av1_highbd_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_1024_add/;
 
-  add_proto qw/void vp10_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_34_add/;
+  add_proto qw/void av1_highbd_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_34_add/;
 
-  add_proto qw/void vp10_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_idct32x32_1_add/;
+  add_proto qw/void av1_highbd_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_idct32x32_1_add/;
 
-  add_proto qw/void vp10_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_iwht4x4_1_add/;
+  add_proto qw/void av1_highbd_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_iwht4x4_1_add/;
 
-  add_proto qw/void vp10_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-  specialize qw/vp10_highbd_iwht4x4_16_add/;
+  add_proto qw/void av1_highbd_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+  specialize qw/av1_highbd_iwht4x4_16_add/;
 
   # Force C versions if CONFIG_EMULATE_HARDWARE is 1
   if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct4x4_16_add/;
+    add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct4x4_16_add/;
 
-    add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_64_add/;
+    add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_64_add/;
 
-    add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_10_add/;
+    add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_10_add/;
 
-    add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_256_add/;
+    add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_256_add/;
 
-    add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_10_add/;
+    add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_10_add/;
   } else {
-    add_proto qw/void vp10_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct4x4_16_add sse2/;
+    add_proto qw/void av1_highbd_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct4x4_16_add sse2/;
 
-    add_proto qw/void vp10_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_64_add sse2/;
+    add_proto qw/void av1_highbd_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_64_add sse2/;
 
-    add_proto qw/void vp10_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct8x8_10_add sse2/;
+    add_proto qw/void av1_highbd_idct8x8_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct8x8_10_add sse2/;
 
-    add_proto qw/void vp10_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_256_add sse2/;
+    add_proto qw/void av1_highbd_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_256_add sse2/;
 
-    add_proto qw/void vp10_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
-    specialize qw/vp10_highbd_idct16x16_10_add sse2/;
+    add_proto qw/void av1_highbd_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride, int bd";
+    specialize qw/av1_highbd_idct16x16_10_add sse2/;
   }  # CONFIG_EMULATE_HARDWARE
 } else {
   # Force C versions if CONFIG_EMULATE_HARDWARE is 1
   if (aom_config("CONFIG_EMULATE_HARDWARE") eq "yes") {
-    add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_1_add/;
+    add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_1_add/;
 
-    add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_16_add/;
+    add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_16_add/;
 
-    add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_1_add/;
+    add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_1_add/;
 
-    add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_64_add/;
+    add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_64_add/;
 
-    add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_12_add/;
+    add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_12_add/;
 
-    add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_1_add/;
+    add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_1_add/;
 
-    add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_256_add/;
+    add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_256_add/;
 
-    add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_10_add/;
+    add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_10_add/;
 
-    add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1024_add/;
+    add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1024_add/;
 
-    add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_34_add/;
+    add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_34_add/;
 
-    add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1_add/;
+    add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1_add/;
 
-    add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_1_add/;
+    add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_1_add/;
 
-    add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_16_add/;
+    add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_16_add/;
   } else {
-    add_proto qw/void vp10_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_1_add sse2/;
+    add_proto qw/void av1_idct4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_1_add sse2/;
 
-    add_proto qw/void vp10_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct4x4_16_add sse2/;
+    add_proto qw/void av1_idct4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct4x4_16_add sse2/;
 
-    add_proto qw/void vp10_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_1_add sse2/;
+    add_proto qw/void av1_idct8x8_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_1_add sse2/;
 
-    add_proto qw/void vp10_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_64_add sse2/;
+    add_proto qw/void av1_idct8x8_64_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_64_add sse2/;
 
-    add_proto qw/void vp10_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct8x8_12_add sse2/;
+    add_proto qw/void av1_idct8x8_12_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct8x8_12_add sse2/;
 
-    add_proto qw/void vp10_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_1_add sse2/;
+    add_proto qw/void av1_idct16x16_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_1_add sse2/;
 
-    add_proto qw/void vp10_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_256_add sse2/;
+    add_proto qw/void av1_idct16x16_256_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_256_add sse2/;
 
-    add_proto qw/void vp10_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct16x16_10_add sse2/;
+    add_proto qw/void av1_idct16x16_10_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct16x16_10_add sse2/;
 
-    add_proto qw/void vp10_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1024_add sse2/;
+    add_proto qw/void av1_idct32x32_1024_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1024_add sse2/;
 
-    add_proto qw/void vp10_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_34_add sse2/;
+    add_proto qw/void av1_idct32x32_34_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_34_add sse2/;
 
-    add_proto qw/void vp10_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_idct32x32_1_add sse2/;
+    add_proto qw/void av1_idct32x32_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_idct32x32_1_add sse2/;
 
-    add_proto qw/void vp10_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_1_add/;
+    add_proto qw/void av1_iwht4x4_1_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_1_add/;
 
-    add_proto qw/void vp10_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
-    specialize qw/vp10_iwht4x4_16_add/;
+    add_proto qw/void av1_iwht4x4_16_add/, "const tran_low_t *input, uint8_t *dest, int dest_stride";
+    specialize qw/av1_iwht4x4_16_add/;
   }  # CONFIG_EMULATE_HARDWARE
-}  # CONFIG_VPX_HIGHBITDEPTH
+}  # CONFIG_AOM_HIGHBITDEPTH
 
 #
 # Motion search
 #
-add_proto qw/int vp10_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
-specialize qw/vp10_full_search_sad sse3 sse4_1/;
-$vp10_full_search_sad_sse3=vp10_full_search_sadx3;
-$vp10_full_search_sad_sse4_1=vp10_full_search_sadx8;
+add_proto qw/int av1_full_search_sad/, "const struct macroblock *x, const struct mv *ref_mv, int sad_per_bit, int distance, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv, struct mv *best_mv";
+specialize qw/av1_full_search_sad sse3 sse4_1/;
+$av1_full_search_sad_sse3=av1_full_search_sadx3;
+$av1_full_search_sad_sse4_1=av1_full_search_sadx8;
 
-add_proto qw/int vp10_diamond_search_sad/, "const struct macroblock *x, const struct search_site_config *cfg,  struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_diamond_search_sad/;
+add_proto qw/int av1_diamond_search_sad/, "const struct macroblock *x, const struct search_site_config *cfg,  struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_diamond_search_sad/;
 
-add_proto qw/int vp10_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
-specialize qw/vp10_full_range_search/;
+add_proto qw/int av1_full_range_search/, "const struct macroblock *x, const struct search_site_config *cfg, struct mv *ref_mv, struct mv *best_mv, int search_param, int sad_per_bit, int *num00, const struct aom_variance_vtable *fn_ptr, const struct mv *center_mv";
+specialize qw/av1_full_range_search/;
 
-add_proto qw/void vp10_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-specialize qw/vp10_temporal_filter_apply sse2 msa/;
+add_proto qw/void av1_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+specialize qw/av1_temporal_filter_apply sse2 msa/;
 
-if (aom_config("CONFIG_VPX_HIGHBITDEPTH") eq "yes") {
+if (aom_config("CONFIG_AOM_HIGHBITDEPTH") eq "yes") {
 
   # ENCODEMB INVOKE
 
-  add_proto qw/int64_t vp10_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
-  specialize qw/vp10_highbd_block_error sse2/;
+  add_proto qw/int64_t av1_highbd_block_error/, "const tran_low_t *coeff, const tran_low_t *dqcoeff, intptr_t block_size, int64_t *ssz, int bd";
+  specialize qw/av1_highbd_block_error sse2/;
 
   if (aom_config("CONFIG_AOM_QM") eq "yes") {
-    add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+    add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
 
-    add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
+    add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan, const qm_val_t * qm_ptr, const qm_val_t * iqm_ptr";
   } else {
-    add_proto qw/void vp10_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_highbd_quantize_fp/;
+    add_proto qw/void av1_highbd_quantize_fp/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_highbd_quantize_fp/;
 
-    add_proto qw/void vp10_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
-    specialize qw/vp10_highbd_quantize_fp_32x32/;
+    add_proto qw/void av1_highbd_quantize_fp_32x32/, "const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr, const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr, const int16_t *scan, const int16_t *iscan";
+    specialize qw/av1_highbd_quantize_fp_32x32/;
 
   }
 
   # fdct functions
-  add_proto qw/void vp10_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht4x4/;
+  add_proto qw/void av1_highbd_fht4x4/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht4x4/;
 
-  add_proto qw/void vp10_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht8x8/;
+  add_proto qw/void av1_highbd_fht8x8/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht8x8/;
 
-  add_proto qw/void vp10_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
-  specialize qw/vp10_highbd_fht16x16/;
+  add_proto qw/void av1_highbd_fht16x16/, "const int16_t *input, tran_low_t *output, int stride, int tx_type";
+  specialize qw/av1_highbd_fht16x16/;
 
-  add_proto qw/void vp10_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
-  specialize qw/vp10_highbd_fwht4x4/;
+  add_proto qw/void av1_highbd_fwht4x4/, "const int16_t *input, tran_low_t *output, int stride";
+  specialize qw/av1_highbd_fwht4x4/;
 
-  add_proto qw/void vp10_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
-  specialize qw/vp10_highbd_temporal_filter_apply/;
+  add_proto qw/void av1_highbd_temporal_filter_apply/, "uint8_t *frame1, unsigned int stride, uint8_t *frame2, unsigned int block_width, unsigned int block_height, int strength, int filter_weight, unsigned int *accumulator, uint16_t *count";
+  specialize qw/av1_highbd_temporal_filter_apply/;
 
 }
-# End vp10_high encoder functions
+# End av1_high encoder functions
 
 }
 # end encoder functions
diff --git a/av1/common/blockd.c b/av1/common/blockd.c
index 1ea443a47dca6c9a8e4c12b73200a1832a6bdaa2..2ebee03cb13268d200fa489b625c1d19362a7191 100644
--- a/av1/common/blockd.c
+++ b/av1/common/blockd.c
@@ -11,7 +11,7 @@
 
 #include "av1/common/blockd.h"
 
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
                                      const MODE_INFO *left_mi, int b) {
   if (b == 0 || b == 2) {
     if (!left_mi || is_inter_block(&left_mi->mbmi)) return DC_PRED;
@@ -23,7 +23,7 @@ PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
   }
 }
 
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
                                       const MODE_INFO *above_mi, int b) {
   if (b == 0 || b == 1) {
     if (!above_mi || is_inter_block(&above_mi->mbmi)) return DC_PRED;
@@ -35,7 +35,7 @@ PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
   }
 }
 
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg) {
   const struct macroblockd_plane *const pd = &xd->plane[plane];
@@ -74,17 +74,17 @@ void vp10_foreach_transformed_block_in_plane(
   }
 }
 
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
                                     BLOCK_SIZE bsize,
                                     foreach_transformed_block_visitor visit,
                                     void *arg) {
   int plane;
 
   for (plane = 0; plane < MAX_MB_PLANE; ++plane)
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
+    av1_foreach_transformed_block_in_plane(xd, bsize, plane, visit, arg);
 }
 
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
                        BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
                        int aoff, int loff) {
   ENTROPY_CONTEXT *const a = pd->above_context + aoff;
@@ -121,7 +121,7 @@ void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
   }
 }
 
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y) {
   int i;
 
   for (i = 0; i < MAX_MB_PLANE; i++) {
diff --git a/av1/common/blockd.h b/av1/common/blockd.h
index c57827a333f837683db96d7998cf741ae0f92e81..5884182e681b3a206bcc4e295546899db10d9a66 100644
--- a/av1/common/blockd.h
+++ b/av1/common/blockd.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_BLOCKD_H_
-#define VP10_COMMON_BLOCKD_H_
+#ifndef AV1_COMMON_BLOCKD_H_
+#define AV1_COMMON_BLOCKD_H_
 
 #include "./aom_config.h"
 
@@ -110,10 +110,10 @@ static INLINE int has_second_ref(const MB_MODE_INFO *mbmi) {
   return mbmi->ref_frame[1] > INTRA_FRAME;
 }
 
-PREDICTION_MODE vp10_left_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_left_block_mode(const MODE_INFO *cur_mi,
                                      const MODE_INFO *left_mi, int b);
 
-PREDICTION_MODE vp10_above_block_mode(const MODE_INFO *cur_mi,
+PREDICTION_MODE av1_above_block_mode(const MODE_INFO *cur_mi,
                                       const MODE_INFO *above_mi, int b);
 
 enum mv_precision { MV_PRECISION_Q3, MV_PRECISION_Q4 };
@@ -154,7 +154,7 @@ struct macroblockd_plane {
 
 typedef struct RefBuffer {
   // TODO(dkovalev): idx is not really required and should be removed, now it
-  // is used in vp10_onyxd_if.c
+  // is used in av1_onyxd_if.c
   int idx;
   YV12_BUFFER_CONFIG *buf;
   struct scale_factors sf;
@@ -199,7 +199,7 @@ typedef struct macroblockd {
   PARTITION_CONTEXT *above_seg_context;
   PARTITION_CONTEXT left_seg_context[8];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   /* Bit depth: 8, 10, 12 */
   int bd;
 #endif
@@ -241,7 +241,7 @@ static INLINE TX_TYPE get_tx_type(PLANE_TYPE plane_type, const MACROBLOCKD *xd,
   return mbmi->tx_type;
 }
 
-void vp10_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
+void av1_setup_block_planes(MACROBLOCKD *xd, int ss_x, int ss_y);
 
 static INLINE TX_SIZE get_uv_tx_size_impl(TX_SIZE y_tx_size, BLOCK_SIZE bsize,
                                           int xss, int yss) {
@@ -281,16 +281,16 @@ typedef void (*foreach_transformed_block_visitor)(int plane, int block,
                                                   BLOCK_SIZE plane_bsize,
                                                   TX_SIZE tx_size, void *arg);
 
-void vp10_foreach_transformed_block_in_plane(
+void av1_foreach_transformed_block_in_plane(
     const MACROBLOCKD *const xd, BLOCK_SIZE bsize, int plane,
     foreach_transformed_block_visitor visit, void *arg);
 
-void vp10_foreach_transformed_block(const MACROBLOCKD *const xd,
+void av1_foreach_transformed_block(const MACROBLOCKD *const xd,
                                     BLOCK_SIZE bsize,
                                     foreach_transformed_block_visitor visit,
                                     void *arg);
 
-void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
+void av1_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
                        BLOCK_SIZE plane_bsize, TX_SIZE tx_size, int has_eob,
                        int aoff, int loff);
 
@@ -298,4 +298,4 @@ void vp10_set_contexts(const MACROBLOCKD *xd, struct macroblockd_plane *pd,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_BLOCKD_H_
+#endif  // AV1_COMMON_BLOCKD_H_
diff --git a/av1/common/clpf.c b/av1/common/clpf.c
index 38b6d8c526e1514a717e6039063ad09cd2362e0c..0399e0fdf2e23afe29ba72bbf6bc3af679e7e460 100644
--- a/av1/common/clpf.c
+++ b/av1/common/clpf.c
@@ -33,8 +33,8 @@ static void clpf_block(const uint8_t *src, uint8_t *dst, int sstride,
 #define BS MI_SIZE *MI_BLOCK_SIZE
 
 // Iterate over blocks within a superblock
-static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
-                         const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void av1_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
+                         const AV1_COMMON *cm, MACROBLOCKD *xd,
                          MODE_INFO *const *mi_8x8, int xpos, int ypos) {
   // Temporary buffer (to allow SIMD parallelism)
   uint8_t buf_unaligned[BS * BS + 15];
@@ -61,7 +61,7 @@ static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
           has_bottom &= y != MI_BLOCK_SIZE - 1;
           has_right &= x != MI_BLOCK_SIZE - 1;
 #endif
-          vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+          av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
           clpf_block(
               xd->plane[p].dst.buf, CLPF_ALLOW_PIXEL_PARALLELISM
                                         ? buf + y * MI_SIZE * BS + x * MI_SIZE
@@ -79,7 +79,7 @@ static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
       for (x = 0; x < MI_BLOCK_SIZE && xpos + x < cm->mi_cols; x++) {
         const MB_MODE_INFO *mbmi =
             &mi_8x8[(ypos + y) * cm->mi_stride + xpos + x]->mbmi;
-        vp10_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
+        av1_setup_dst_planes(xd->plane, frame_buffer, ypos + y, xpos + x);
         if (!mbmi->skip) {
           int i = 0;
           for (i = 0; i<MI_SIZE>> xd->plane[p].subsampling_y; i++)
@@ -94,11 +94,11 @@ static void vp10_clpf_sb(const YV12_BUFFER_CONFIG *frame_buffer,
 }
 
 // Iterate over the superblocks of an entire frame
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
                      MACROBLOCKD *xd) {
   int x, y;
 
   for (y = 0; y < cm->mi_rows; y += MI_BLOCK_SIZE)
     for (x = 0; x < cm->mi_cols; x += MI_BLOCK_SIZE)
-      vp10_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
+      av1_clpf_sb(frame, cm, xd, cm->mi_grid_visible, x, y);
 }
diff --git a/av1/common/clpf.h b/av1/common/clpf.h
index 683b25f68c44e89e82c9ed0db335a0d331856662..d4587f3ebba17c6bd8c5e95e22e71e48d667b240 100644
--- a/av1/common/clpf.h
+++ b/av1/common/clpf.h
@@ -8,8 +8,8 @@
  * Media Patent License 1.0 was not distributed with this source code in the
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
-#ifndef VP10_COMMON_CLPF_H_
-#define VP10_COMMON_CLPF_H_
+#ifndef AV1_COMMON_CLPF_H_
+#define AV1_COMMON_CLPF_H_
 
 #include "av1/common/reconinter.h"
 
@@ -21,7 +21,7 @@
 #define CLPF_FILTER_ALL_PLANES \
   0  // 1 = filter both luma and chroma, 0 = filter only luma
 
-void vp10_clpf_frame(const YV12_BUFFER_CONFIG *frame, const VP10_COMMON *cm,
+void av1_clpf_frame(const YV12_BUFFER_CONFIG *frame, const AV1_COMMON *cm,
                      MACROBLOCKD *xd);
 
 #endif
diff --git a/av1/common/common.h b/av1/common/common.h
index e97e15bed41141012ad8010334e220c2e17373d6..f39f4f836dcf5d17c85a3856033d197ae9035c2d 100644
--- a/av1/common/common.h
+++ b/av1/common/common.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_COMMON_H_
-#define VP10_COMMON_COMMON_H_
+#ifndef AV1_COMMON_COMMON_H_
+#define AV1_COMMON_COMMON_H_
 
 /* Interface header for common constant data structures and lookup tables */
 
@@ -27,21 +27,21 @@ extern "C" {
 #endif
 
 // Only need this for fixed-size arrays, for structs just assign.
-#define vp10_copy(dest, src)             \
+#define av1_copy(dest, src)             \
   {                                      \
     assert(sizeof(dest) == sizeof(src)); \
     memcpy(dest, src, sizeof(src));      \
   }
 
 // Use this for variably-sized arrays.
-#define vp10_copy_array(dest, src, n)      \
+#define av1_copy_array(dest, src, n)      \
   {                                        \
     assert(sizeof(*dest) == sizeof(*src)); \
     memcpy(dest, src, n * sizeof(*src));   \
   }
 
-#define vp10_zero(dest) memset(&(dest), 0, sizeof(dest))
-#define vp10_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
+#define av1_zero(dest) memset(&(dest), 0, sizeof(dest))
+#define av1_zero_array(dest, n) memset(dest, 0, n * sizeof(*dest))
 
 static INLINE int get_unsigned_bits(unsigned int num_values) {
   return num_values > 0 ? get_msb(num_values) + 1 : 0;
@@ -66,9 +66,9 @@ static INLINE int get_unsigned_bits(unsigned int num_values) {
   } while (0)
 #endif
 // TODO(yaowu: validate the usage of these codes or develop new ones.)
-#define VP10_SYNC_CODE_0 0x49
-#define VP10_SYNC_CODE_1 0x83
-#define VP10_SYNC_CODE_2 0x43
+#define AV1_SYNC_CODE_0 0x49
+#define AV1_SYNC_CODE_1 0x83
+#define AV1_SYNC_CODE_2 0x43
 
 #define VPX_FRAME_MARKER 0x2
 
@@ -76,4 +76,4 @@ static INLINE int get_unsigned_bits(unsigned int num_values) {
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_COMMON_H_
+#endif  // AV1_COMMON_COMMON_H_
diff --git a/av1/common/common_data.h b/av1/common/common_data.h
index d0885f14009da5dde1f043f224c1cdfc27bedb6d..a1db52a558055ff771728e01b50c184e459c1cf1 100644
--- a/av1/common/common_data.h
+++ b/av1/common/common_data.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_COMMON_DATA_H_
-#define VP10_COMMON_COMMON_DATA_H_
+#ifndef AV1_COMMON_COMMON_DATA_H_
+#define AV1_COMMON_COMMON_DATA_H_
 
 #include "av1/common/enums.h"
 #include "aom/aom_integer.h"
@@ -175,4 +175,4 @@ static const struct {
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_COMMON_DATA_H_
+#endif  // AV1_COMMON_COMMON_DATA_H_
diff --git a/av1/common/debugmodes.c b/av1/common/debugmodes.c
index 0052fc4b11c3c7508af2c7a5d17b2dfd265d451e..d7b31c1e4cb295042d8d1e43ce8993d35f9ff65d 100644
--- a/av1/common/debugmodes.c
+++ b/av1/common/debugmodes.c
@@ -14,7 +14,7 @@
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
 
-static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) {
+static void log_frame_info(AV1_COMMON *cm, const char *str, FILE *f) {
   fprintf(f, "%s", str);
   fprintf(f, "(Frame %d, Show:%d, Q:%d): \n", cm->current_video_frame,
           cm->show_frame, cm->base_qindex);
@@ -23,7 +23,7 @@ static void log_frame_info(VP10_COMMON *cm, const char *str, FILE *f) {
  * and uses the passed in member offset to print out the value of an integer
  * for each mbmi member value in the mi structure.
  */
-static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
+static void print_mi_data(AV1_COMMON *cm, FILE *file, const char *descriptor,
                           size_t member_offset) {
   int mi_row, mi_col;
   MODE_INFO **mi = cm->mi_grid_visible;
@@ -44,7 +44,7 @@ static void print_mi_data(VP10_COMMON *cm, FILE *file, const char *descriptor,
   fprintf(file, "\n");
 }
 
-void vp10_print_modes_and_motion_vectors(VP10_COMMON *cm, const char *file) {
+void av1_print_modes_and_motion_vectors(AV1_COMMON *cm, const char *file) {
   int mi_row;
   int mi_col;
   FILE *mvs = fopen(file, "a");
diff --git a/av1/common/dering.c b/av1/common/dering.c
index 948e77eaf9cc302b149c31d62aaa9e59db151f5f..0717e68079850d8540099349ddecdc7cdb97241a 100644
--- a/av1/common/dering.c
+++ b/av1/common/dering.c
@@ -28,7 +28,7 @@ int compute_level_from_index(int global_level, int gi) {
   return clamp(level, gi, MAX_DERING_LEVEL-1);
 }
 
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col) {
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col) {
   int r, c;
   int maxc, maxr;
   int skip = 1;
@@ -46,7 +46,7 @@ int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col) {
   return skip;
 }
 
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                        MACROBLOCKD *xd, int global_level) {
   int r, c;
   int sbr, sbc;
@@ -62,7 +62,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
   nvsb = (cm->mi_rows + MI_BLOCK_SIZE - 1)/MI_BLOCK_SIZE;
   nhsb = (cm->mi_cols + MI_BLOCK_SIZE - 1)/MI_BLOCK_SIZE;
   bskip = aom_malloc(sizeof(*bskip)*cm->mi_rows*cm->mi_cols);
-  vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+  av1_setup_dst_planes(xd->plane, frame, 0, 0);
   for (pli = 0; pli < 3; pli++) {
     dec[pli] = xd->plane[pli].subsampling_x;
     bsize[pli] = 8 >> dec[pli];
@@ -72,7 +72,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
     src[pli] = aom_malloc(sizeof(*src)*cm->mi_rows*cm->mi_cols*64);
     for (r = 0; r < bsize[pli]*cm->mi_rows; ++r) {
       for (c = 0; c < bsize[pli]*cm->mi_cols; ++c) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cm->use_highbitdepth) {
           src[pli][r * stride + c] =
               CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
@@ -81,7 +81,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
 #endif
           src[pli][r * stride + c] =
               xd->plane[pli].dst.buf[r * xd->plane[pli].dst.stride + c];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         }
 #endif
       }
@@ -127,7 +127,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
             cm->mi_cols, threshold, OD_DERING_NO_CHECK_OVERLAP, coeff_shift);
         for (r = 0; r < bsize[pli]*nvb; ++r) {
           for (c = 0; c < bsize[pli]*nhb; ++c) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             if (cm->use_highbitdepth) {
               CONVERT_TO_SHORTPTR(xd->plane[pli].dst.buf)
                   [xd->plane[pli].dst.stride*(bsize[pli]*MI_BLOCK_SIZE*sbr + r)
@@ -139,7 +139,7 @@ void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
                   (bsize[pli]*MI_BLOCK_SIZE*sbr + r) +
                   sbc*bsize[pli]*MI_BLOCK_SIZE + c] =
                   dst[r * MI_BLOCK_SIZE * bsize[pli] + c];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             }
 #endif
           }
diff --git a/av1/common/dering.h b/av1/common/dering.h
index 98a6f93302eb6a560325f62c65f6a302538aa425..a46e2074d6b97b0b4155dbb22c7f0dfce171108d 100644
--- a/av1/common/dering.h
+++ b/av1/common/dering.h
@@ -8,8 +8,8 @@
  * Media Patent License 1.0 was not distributed with this source code in the
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
-#ifndef VP10_COMMON_DERING_H_
-#define VP10_COMMON_DERING_H_
+#ifndef AV1_COMMON_DERING_H_
+#define AV1_COMMON_DERING_H_
 
 #include "av1/common/od_dering.h"
 #include "av1/common/onyxc_int.h"
@@ -29,15 +29,15 @@ extern "C" {
 #define DERING_REFINEMENT_LEVELS 4
 
 int compute_level_from_index(int global_level, int gi);
-int sb_all_skip(const VP10_COMMON *const cm, int mi_row, int mi_col);
-void vp10_dering_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+int sb_all_skip(const AV1_COMMON *const cm, int mi_row, int mi_col);
+void av1_dering_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                        MACROBLOCKD *xd, int global_level);
 
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
-                      VP10_COMMON *cm,
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+                      AV1_COMMON *cm,
                       MACROBLOCKD *xd);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
-#endif  // VP10_COMMON_DERING_H_
+#endif  // AV1_COMMON_DERING_H_
diff --git a/av1/common/entropy.c b/av1/common/entropy.c
index f013376993f536bd4da1778a575079f512a15be5..d433bfb14fa500119a51dda8fad77b8b82ecad71 100644
--- a/av1/common/entropy.c
+++ b/av1/common/entropy.c
@@ -17,7 +17,7 @@
 #include "aom/aom_integer.h"
 
 // Unconstrained Node Tree
-const aom_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
   2,
   6,  // 0 = LOW_VAL
   -TWO_TOKEN,
@@ -36,33 +36,33 @@ const aom_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
   -CATEGORY6_TOKEN  // 7 = CAT_FIVE
 };
 
-const aom_prob vp10_cat1_prob[] = { 159 };
-const aom_prob vp10_cat2_prob[] = { 165, 145 };
-const aom_prob vp10_cat3_prob[] = { 173, 148, 140 };
-const aom_prob vp10_cat4_prob[] = { 176, 155, 140, 135 };
-const aom_prob vp10_cat5_prob[] = { 180, 157, 141, 134, 130 };
-const aom_prob vp10_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
+const aom_prob av1_cat1_prob[] = { 159 };
+const aom_prob av1_cat2_prob[] = { 165, 145 };
+const aom_prob av1_cat3_prob[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob[] = { 254, 254, 254, 252, 249, 243, 230,
                                     196, 177, 153, 140, 133, 130, 129 };
-#if CONFIG_VPX_HIGHBITDEPTH
-const aom_prob vp10_cat1_prob_high10[] = { 159 };
-const aom_prob vp10_cat2_prob_high10[] = { 165, 145 };
-const aom_prob vp10_cat3_prob_high10[] = { 173, 148, 140 };
-const aom_prob vp10_cat4_prob_high10[] = { 176, 155, 140, 135 };
-const aom_prob vp10_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
-const aom_prob vp10_cat6_prob_high10[] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const aom_prob av1_cat1_prob_high10[] = { 159 };
+const aom_prob av1_cat2_prob_high10[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high10[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high10[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high10[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high10[] = {
   255, 255, 254, 254, 254, 252, 249, 243, 230, 196, 177, 153, 140, 133, 130, 129
 };
-const aom_prob vp10_cat1_prob_high12[] = { 159 };
-const aom_prob vp10_cat2_prob_high12[] = { 165, 145 };
-const aom_prob vp10_cat3_prob_high12[] = { 173, 148, 140 };
-const aom_prob vp10_cat4_prob_high12[] = { 176, 155, 140, 135 };
-const aom_prob vp10_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
-const aom_prob vp10_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
+const aom_prob av1_cat1_prob_high12[] = { 159 };
+const aom_prob av1_cat2_prob_high12[] = { 165, 145 };
+const aom_prob av1_cat3_prob_high12[] = { 173, 148, 140 };
+const aom_prob av1_cat4_prob_high12[] = { 176, 155, 140, 135 };
+const aom_prob av1_cat5_prob_high12[] = { 180, 157, 141, 134, 130 };
+const aom_prob av1_cat6_prob_high12[] = { 255, 255, 255, 255, 254, 254,
                                            254, 252, 249, 243, 230, 196,
                                            177, 153, 140, 133, 130, 129 };
 #endif
 
-const uint8_t vp10_coefband_trans_8x8plus[1024] = {
+const uint8_t av1_coefband_trans_8x8plus[1024] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5,
   // beyond MAXBAND_INDEX+1 all values are filled as 5
   5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
@@ -106,11 +106,11 @@ const uint8_t vp10_coefband_trans_8x8plus[1024] = {
   5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
 };
 
-const uint8_t vp10_coefband_trans_4x4[16] = {
+const uint8_t av1_coefband_trans_4x4[16] = {
   0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5,
 };
 
-const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
+const uint8_t av1_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
                                                        4, 5, 5, 5, 5, 5 };
 
 // Model obtained from a 2-sided zero-centerd distribuition derived
@@ -125,9 +125,9 @@ const uint8_t vp10_pt_energy_class[ENTROPY_TOKENS] = { 0, 1, 2, 3, 3, 4,
 
 // Every odd line in this table can be generated from the even lines
 // by averaging :
-// vp10_pareto8_full[l][node] = (vp10_pareto8_full[l-1][node] +
-//                              vp10_pareto8_full[l+1][node] ) >> 1;
-const aom_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
+// av1_pareto8_full[l][node] = (av1_pareto8_full[l-1][node] +
+//                              av1_pareto8_full[l+1][node] ) >> 1;
+const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
   { 3, 86, 128, 6, 86, 23, 88, 29 },
   { 6, 86, 128, 11, 87, 42, 91, 52 },
   { 9, 86, 129, 17, 88, 61, 94, 76 },
@@ -385,7 +385,7 @@ const aom_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES] = {
   { 255, 246, 247, 255, 239, 255, 253, 255 },
 };
 
-static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
   {     // Y plane
     {   // Intra
       { // Band 0
@@ -550,7 +550,7 @@ static const vp10_coeff_probs_model default_coef_probs_4x4[PLANE_TYPES] = {
         { 8, 23, 61 } } } }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
   {     // Y plane
     {   // Intra
       { // Band 0
@@ -715,7 +715,7 @@ static const vp10_coeff_probs_model default_coef_probs_8x8[PLANE_TYPES] = {
         { 1, 23, 41 } } } }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
   {     // Y plane
     {   // Intra
       { // Band 0
@@ -880,7 +880,7 @@ static const vp10_coeff_probs_model default_coef_probs_16x16[PLANE_TYPES] = {
         { 1, 17, 31 } } } }
 };
 
-static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
+static const av1_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
   {     // Y plane
     {   // Intra
       { // Band 0
@@ -1048,21 +1048,21 @@ static const vp10_coeff_probs_model default_coef_probs_32x32[PLANE_TYPES] = {
 static void extend_to_full_distribution(aom_prob *probs, aom_prob p) {
   // TODO(aconverse): model[PIVOT_NODE] should never be zero.
   // https://code.google.com/p/webm/issues/detail?id=1089
-  memcpy(probs, vp10_pareto8_full[p == 0 ? 254 : p - 1],
+  memcpy(probs, av1_pareto8_full[p == 0 ? 254 : p - 1],
          MODEL_NODES * sizeof(aom_prob));
 }
 
-void vp10_model_to_full_probs(const aom_prob *model, aom_prob *full) {
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full) {
   if (full != model)
     memcpy(full, model, sizeof(aom_prob) * UNCONSTRAINED_NODES);
   extend_to_full_distribution(&full[UNCONSTRAINED_NODES], model[PIVOT_NODE]);
 }
 
-void vp10_default_coef_probs(VP10_COMMON *cm) {
-  vp10_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
-  vp10_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
-  vp10_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
-  vp10_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
+void av1_default_coef_probs(AV1_COMMON *cm) {
+  av1_copy(cm->fc->coef_probs[TX_4X4], default_coef_probs_4x4);
+  av1_copy(cm->fc->coef_probs[TX_8X8], default_coef_probs_8x8);
+  av1_copy(cm->fc->coef_probs[TX_16X16], default_coef_probs_16x16);
+  av1_copy(cm->fc->coef_probs[TX_32X32], default_coef_probs_32x32);
 }
 
 #define COEF_COUNT_SAT 24
@@ -1072,13 +1072,13 @@ void vp10_default_coef_probs(VP10_COMMON *cm) {
 #define COEF_COUNT_SAT_AFTER_KEY 24
 #define COEF_MAX_UPDATE_FACTOR_AFTER_KEY 128
 
-static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
+static void adapt_coef_probs(AV1_COMMON *cm, TX_SIZE tx_size,
                              unsigned int count_sat,
                              unsigned int update_factor) {
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
-  vp10_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
-  const vp10_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
-  vp10_coeff_count_model *counts = cm->counts.coef[tx_size];
+  av1_coeff_probs_model *const probs = cm->fc->coef_probs[tx_size];
+  const av1_coeff_probs_model *const pre_probs = pre_fc->coef_probs[tx_size];
+  av1_coeff_count_model *counts = cm->counts.coef[tx_size];
   unsigned int(*eob_counts)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       cm->counts.eob_branch[tx_size];
   int i, j, k, l, m;
@@ -1101,7 +1101,7 @@ static void adapt_coef_probs(VP10_COMMON *cm, TX_SIZE tx_size,
         }
 }
 
-void vp10_adapt_coef_probs(VP10_COMMON *cm) {
+void av1_adapt_coef_probs(AV1_COMMON *cm) {
   TX_SIZE t;
   unsigned int count_sat, update_factor;
 
diff --git a/av1/common/entropy.h b/av1/common/entropy.h
index 21a09870247a57490f9c73987b2d2353dee54463..fdfccf3886562762778bbae47c679614b1136962 100644
--- a/av1/common/entropy.h
+++ b/av1/common/entropy.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_ENTROPY_H_
-#define VP10_COMMON_ENTROPY_H_
+#ifndef AV1_COMMON_ENTROPY_H_
+#define AV1_COMMON_ENTROPY_H_
 
 #include "aom/aom_integer.h"
 #include "aom_dsp/prob.h"
@@ -43,7 +43,7 @@ extern "C" {
 
 #define ENTROPY_NODES 11
 
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_pt_energy_class[ENTROPY_TOKENS]);
 
 #define CAT1_MIN_VAL 5
 #define CAT2_MIN_VAL 7
@@ -53,27 +53,27 @@ DECLARE_ALIGNED(16, extern const uint8_t, vp10_pt_energy_class[ENTROPY_TOKENS]);
 #define CAT6_MIN_VAL 67
 
 // Extra bit probabilities.
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob[14]);
-
-#if CONFIG_VPX_HIGHBITDEPTH
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high10[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high10[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high10[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high10[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high10[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high10[16]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat1_prob_high12[1]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat2_prob_high12[2]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat3_prob_high12[3]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat4_prob_high12[4]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat5_prob_high12[5]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_cat6_prob_high12[18]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob[14]);
+
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high10[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high10[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high10[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high10[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high10[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high10[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat1_prob_high12[1]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat2_prob_high12[2]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat3_prob_high12[3]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat4_prob_high12[4]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat5_prob_high12[5]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_cat6_prob_high12[18]);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define EOB_MODEL_TOKEN 3
 
@@ -83,20 +83,20 @@ typedef struct {
   int len;
   int base_val;
   const int16_t *cost;
-} vp10_extra_bit;
+} av1_extra_bit;
 
 // indexed by token value
-extern const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS];
-#if CONFIG_VPX_HIGHBITDEPTH
-extern const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS];
-extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS];
+#if CONFIG_AOM_HIGHBITDEPTH
+extern const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS];
+extern const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS];
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #define DCT_MAX_VALUE 16384
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_MAX_VALUE_HIGH10 65536
 #define DCT_MAX_VALUE_HIGH12 262144
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 /* Coefficients are predicted via a 3-dimensional probability table. */
 
@@ -127,28 +127,28 @@ extern const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS];
 // #define ENTROPY_STATS
 
 typedef unsigned int
-    vp10_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
+    av1_coeff_count[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_TOKENS];
 typedef unsigned int
-    vp10_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
+    av1_coeff_stats[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][ENTROPY_NODES][2];
 
 #define SUBEXP_PARAM 4   /* Subexponential code parameter */
 #define MODULUS_PARAM 13 /* Modulus parameter */
 
-struct VP10Common;
-void vp10_default_coef_probs(struct VP10Common *cm);
-void vp10_adapt_coef_probs(struct VP10Common *cm);
+struct AV1Common;
+void av1_default_coef_probs(struct AV1Common *cm);
+void av1_adapt_coef_probs(struct AV1Common *cm);
 
 // This is the index in the scan order beyond which all coefficients for
 // 8x8 transform and above are in the top band.
 // This macro is currently unused but may be used by certain implementations
 #define MAXBAND_INDEX 21
 
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_8x8plus[1024]);
-DECLARE_ALIGNED(16, extern const uint8_t, vp10_coefband_trans_4x4[16]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_8x8plus[1024]);
+DECLARE_ALIGNED(16, extern const uint8_t, av1_coefband_trans_4x4[16]);
 
 static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
-  return tx_size == TX_4X4 ? vp10_coefband_trans_4x4
-                           : vp10_coefband_trans_8x8plus;
+  return tx_size == TX_4X4 ? av1_coefband_trans_4x4
+                           : av1_coefband_trans_8x8plus;
 }
 
 // 128 lists of probabilities are stored for the following ONE node probs:
@@ -162,16 +162,16 @@ static INLINE const uint8_t *get_band_translate(TX_SIZE tx_size) {
 #define PIVOT_NODE 2  // which node is pivot
 
 #define MODEL_NODES (ENTROPY_NODES - UNCONSTRAINED_NODES)
-extern const aom_tree_index vp10_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
-extern const aom_prob vp10_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
+extern const aom_tree_index av1_coef_con_tree[TREE_SIZE(ENTROPY_TOKENS)];
+extern const aom_prob av1_pareto8_full[COEFF_PROB_MODELS][MODEL_NODES];
 
-typedef aom_prob vp10_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
+typedef aom_prob av1_coeff_probs_model[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS]
                                        [UNCONSTRAINED_NODES];
 
-typedef unsigned int vp10_coeff_count_model
+typedef unsigned int av1_coeff_count_model
     [REF_TYPES][COEF_BANDS][COEFF_CONTEXTS][UNCONSTRAINED_NODES + 1];
 
-void vp10_model_to_full_probs(const aom_prob *model, aom_prob *full);
+void av1_model_to_full_probs(const aom_prob *model, aom_prob *full);
 
 typedef char ENTROPY_CONTEXT;
 
@@ -211,4 +211,4 @@ static INLINE int get_entropy_context(TX_SIZE tx_size, const ENTROPY_CONTEXT *a,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPY_H_
+#endif  // AV1_COMMON_ENTROPY_H_
diff --git a/av1/common/entropymode.c b/av1/common/entropymode.c
index 0753288f9d34c360b8019121813a0a0eafe6a88e..97c542e763ac5f07013b02ee2904e98dd9243fa5 100644
--- a/av1/common/entropymode.c
+++ b/av1/common/entropymode.c
@@ -14,7 +14,7 @@
 #include "av1/common/onyxc_int.h"
 #include "av1/common/seg_common.h"
 
-const aom_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
+const aom_prob av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
     { {
           // above = dc
           { 137, 30, 42, 148, 151, 207, 70, 52, 91 },   // left = dc
@@ -147,7 +147,7 @@ const aom_prob vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1] =
       } };
 
 #if !CONFIG_MISC_FIXES
-const aom_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
+const aom_prob av1_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1] = {
   { 144, 11, 54, 157, 195, 130, 46, 58, 108 },   // y = dc
   { 118, 15, 123, 148, 131, 101, 44, 93, 131 },  // y = v
   { 113, 12, 23, 188, 226, 142, 26, 32, 125 },   // y = h
@@ -183,7 +183,7 @@ static const aom_prob default_uv_probs[INTRA_MODES][INTRA_MODES - 1] = {
 
 #if !CONFIG_MISC_FIXES
 const aom_prob
-    vp10_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
+    av1_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1] = {
       // 8x8 -> 4x4
       { 158, 97, 94 },  // a/l both not split
       { 93, 24, 99 },   // a split, l not split
@@ -243,7 +243,7 @@ static const aom_prob
     };
 
 /* Array indices are identical to previously-existing INTRAMODECONTEXTNODES. */
-const aom_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
+const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
   -DC_PRED,   2,          /* 0 = DC_NODE */
   -TM_PRED,   4,          /* 1 = TM_NODE */
   -V_PRED,    6,          /* 2 = V_NODE */
@@ -255,12 +255,12 @@ const aom_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)] = {
   -D153_PRED, -D207_PRED  /* 8 = D153_NODE */
 };
 
-const aom_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
+const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)] = {
   -INTER_OFFSET(ZEROMV), 2, -INTER_OFFSET(NEARESTMV), 4, -INTER_OFFSET(NEARMV),
   -INTER_OFFSET(NEWMV)
 };
 
-const aom_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
+const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)] = {
   -PARTITION_NONE, 2, -PARTITION_HORZ, 4, -PARTITION_VERT, -PARTITION_SPLIT
 };
 
@@ -286,7 +286,7 @@ static const struct tx_probs default_tx_probs = { { { 3, 136, 37 },
 
                                                   { { 100 }, { 66 } } };
 
-void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
+void av1_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
                                            unsigned int (*ct_32x32p)[2]) {
   ct_32x32p[0][0] = tx_count_32x32p[TX_4X4];
   ct_32x32p[0][1] = tx_count_32x32p[TX_8X8] + tx_count_32x32p[TX_16X16] +
@@ -297,7 +297,7 @@ void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
   ct_32x32p[2][1] = tx_count_32x32p[TX_32X32];
 }
 
-void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
+void av1_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
                                            unsigned int (*ct_16x16p)[2]) {
   ct_16x16p[0][0] = tx_count_16x16p[TX_4X4];
   ct_16x16p[0][1] = tx_count_16x16p[TX_8X8] + tx_count_16x16p[TX_16X16];
@@ -305,7 +305,7 @@ void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
   ct_16x16p[1][1] = tx_count_16x16p[TX_16X16];
 }
 
-void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
+void av1_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
                                          unsigned int (*ct_8x8p)[2]) {
   ct_8x8p[0][0] = tx_count_8x8p[TX_4X4];
   ct_8x8p[0][1] = tx_count_8x8p[TX_8X8];
@@ -325,7 +325,7 @@ static const struct segmentation_probs default_seg_probs = {
 };
 #endif
 
-const aom_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
+const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)] = {
   -DCT_DCT, 2, -ADST_ADST, 4, -ADST_DCT, -DCT_ADST
 };
 
@@ -341,29 +341,29 @@ static const aom_prob default_inter_ext_tx_prob[EXT_TX_SIZES][TX_TYPES - 1] = {
 };
 
 static void init_mode_probs(FRAME_CONTEXT *fc) {
-  vp10_copy(fc->uv_mode_prob, default_uv_probs);
-  vp10_copy(fc->y_mode_prob, default_if_y_probs);
-  vp10_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
-  vp10_copy(fc->partition_prob, default_partition_probs);
-  vp10_copy(fc->intra_inter_prob, default_intra_inter_p);
-  vp10_copy(fc->comp_inter_prob, default_comp_inter_p);
-  vp10_copy(fc->comp_ref_prob, default_comp_ref_p);
-  vp10_copy(fc->single_ref_prob, default_single_ref_p);
+  av1_copy(fc->uv_mode_prob, default_uv_probs);
+  av1_copy(fc->y_mode_prob, default_if_y_probs);
+  av1_copy(fc->switchable_interp_prob, default_switchable_interp_prob);
+  av1_copy(fc->partition_prob, default_partition_probs);
+  av1_copy(fc->intra_inter_prob, default_intra_inter_p);
+  av1_copy(fc->comp_inter_prob, default_comp_inter_p);
+  av1_copy(fc->comp_ref_prob, default_comp_ref_p);
+  av1_copy(fc->single_ref_prob, default_single_ref_p);
   fc->tx_probs = default_tx_probs;
-  vp10_copy(fc->skip_probs, default_skip_probs);
-  vp10_copy(fc->inter_mode_probs, default_inter_mode_probs);
+  av1_copy(fc->skip_probs, default_skip_probs);
+  av1_copy(fc->inter_mode_probs, default_inter_mode_probs);
 #if CONFIG_MISC_FIXES
-  vp10_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
-  vp10_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
+  av1_copy(fc->seg.tree_probs, default_seg_probs.tree_probs);
+  av1_copy(fc->seg.pred_probs, default_seg_probs.pred_probs);
 #endif
-  vp10_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
-  vp10_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
+  av1_copy(fc->intra_ext_tx_prob, default_intra_ext_tx_prob);
+  av1_copy(fc->inter_ext_tx_prob, default_inter_ext_tx_prob);
 }
 
-const aom_tree_index vp10_switchable_interp_tree[TREE_SIZE(
+const aom_tree_index av1_switchable_interp_tree[TREE_SIZE(
     SWITCHABLE_FILTERS)] = { -EIGHTTAP, 2, -EIGHTTAP_SMOOTH, -EIGHTTAP_SHARP };
 
-void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_inter_frame_probs(AV1_COMMON *cm) {
   int i, j;
   FRAME_CONTEXT *fc = cm->fc;
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -384,32 +384,32 @@ void vp10_adapt_inter_frame_probs(VP10_COMMON *cm) {
           pre_fc->single_ref_prob[i][j], counts->single_ref[i][j]);
 
   for (i = 0; i < INTER_MODE_CONTEXTS; i++)
-    aom_tree_merge_probs(vp10_inter_mode_tree, pre_fc->inter_mode_probs[i],
+    aom_tree_merge_probs(av1_inter_mode_tree, pre_fc->inter_mode_probs[i],
                          counts->inter_mode[i], fc->inter_mode_probs[i]);
 
   for (i = 0; i < BLOCK_SIZE_GROUPS; i++)
-    aom_tree_merge_probs(vp10_intra_mode_tree, pre_fc->y_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->y_mode_prob[i],
                          counts->y_mode[i], fc->y_mode_prob[i]);
 
 #if !CONFIG_MISC_FIXES
   for (i = 0; i < INTRA_MODES; ++i)
-    aom_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
                          counts->uv_mode[i], fc->uv_mode_prob[i]);
 
   for (i = 0; i < PARTITION_CONTEXTS; i++)
-    aom_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+    aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
                          counts->partition[i], fc->partition_prob[i]);
 #endif
 
   if (cm->interp_filter == SWITCHABLE) {
     for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; i++)
       aom_tree_merge_probs(
-          vp10_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
+          av1_switchable_interp_tree, pre_fc->switchable_interp_prob[i],
           counts->switchable_interp[i], fc->switchable_interp_prob[i]);
   }
 }
 
-void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
+void av1_adapt_intra_frame_probs(AV1_COMMON *cm) {
   int i;
   FRAME_CONTEXT *fc = cm->fc;
   const FRAME_CONTEXT *pre_fc = &cm->frame_contexts[cm->frame_context_idx];
@@ -422,18 +422,18 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
     unsigned int branch_ct_32x32p[TX_SIZES - 1][2];
 
     for (i = 0; i < TX_SIZE_CONTEXTS; ++i) {
-      vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
+      av1_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], branch_ct_8x8p);
       for (j = 0; j < TX_SIZES - 3; ++j)
         fc->tx_probs.p8x8[i][j] =
             mode_mv_merge_probs(pre_fc->tx_probs.p8x8[i][j], branch_ct_8x8p[j]);
 
-      vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i],
+      av1_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i],
                                             branch_ct_16x16p);
       for (j = 0; j < TX_SIZES - 2; ++j)
         fc->tx_probs.p16x16[i][j] = mode_mv_merge_probs(
             pre_fc->tx_probs.p16x16[i][j], branch_ct_16x16p[j]);
 
-      vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i],
+      av1_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i],
                                             branch_ct_32x32p);
       for (j = 0; j < TX_SIZES - 1; ++j)
         fc->tx_probs.p32x32[i][j] = mode_mv_merge_probs(
@@ -448,12 +448,12 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     int j;
     for (j = 0; j < TX_TYPES; ++j)
-      aom_tree_merge_probs(vp10_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
+      aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->intra_ext_tx_prob[i][j],
                            counts->intra_ext_tx[i][j],
                            fc->intra_ext_tx_prob[i][j]);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    aom_tree_merge_probs(vp10_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
+    aom_tree_merge_probs(av1_ext_tx_tree, pre_fc->inter_ext_tx_prob[i],
                          counts->inter_ext_tx[i], fc->inter_ext_tx_prob[i]);
   }
 
@@ -463,19 +463,19 @@ void vp10_adapt_intra_frame_probs(VP10_COMMON *cm) {
       fc->seg.pred_probs[i] =
           mode_mv_merge_probs(pre_fc->seg.pred_probs[i], counts->seg.pred[i]);
 
-    aom_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+    aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_mispred, fc->seg.tree_probs);
   } else {
-    aom_tree_merge_probs(vp10_segment_tree, pre_fc->seg.tree_probs,
+    aom_tree_merge_probs(av1_segment_tree, pre_fc->seg.tree_probs,
                          counts->seg.tree_total, fc->seg.tree_probs);
   }
 
   for (i = 0; i < INTRA_MODES; ++i)
-    aom_tree_merge_probs(vp10_intra_mode_tree, pre_fc->uv_mode_prob[i],
+    aom_tree_merge_probs(av1_intra_mode_tree, pre_fc->uv_mode_prob[i],
                          counts->uv_mode[i], fc->uv_mode_prob[i]);
 
   for (i = 0; i < PARTITION_CONTEXTS; i++)
-    aom_tree_merge_probs(vp10_partition_tree, pre_fc->partition_prob[i],
+    aom_tree_merge_probs(av1_partition_tree, pre_fc->partition_prob[i],
                          counts->partition[i], fc->partition_prob[i]);
 #endif
 }
@@ -493,13 +493,13 @@ static void set_default_lf_deltas(struct loopfilter *lf) {
   lf->mode_deltas[1] = 0;
 }
 
-void vp10_setup_past_independence(VP10_COMMON *cm) {
+void av1_setup_past_independence(AV1_COMMON *cm) {
   // Reset the segment feature data to the default stats:
   // Features disabled, 0, with delta coding (Default state).
   struct loopfilter *const lf = &cm->lf;
 
   int i;
-  vp10_clearall_segfeatures(&cm->seg);
+  av1_clearall_segfeatures(&cm->seg);
   cm->seg.abs_delta = SEGMENT_DELTADATA;
 
   if (cm->last_frame_seg_map && !cm->frame_parallel_decode)
@@ -509,16 +509,16 @@ void vp10_setup_past_independence(VP10_COMMON *cm) {
     memset(cm->current_frame_seg_map, 0, (cm->mi_rows * cm->mi_cols));
 
   // Reset the mode ref deltas for loop filter
-  vp10_zero(lf->last_ref_deltas);
-  vp10_zero(lf->last_mode_deltas);
+  av1_zero(lf->last_ref_deltas);
+  av1_zero(lf->last_mode_deltas);
   set_default_lf_deltas(lf);
 
   // To force update of the sharpness
   lf->last_sharpness_level = -1;
 
-  vp10_default_coef_probs(cm);
+  av1_default_coef_probs(cm);
   init_mode_probs(cm->fc);
-  vp10_init_mv_probs(cm);
+  av1_init_mv_probs(cm);
   cm->fc->initialized = 1;
 
   if (cm->frame_type == KEY_FRAME || cm->error_resilient_mode ||
diff --git a/av1/common/entropymode.h b/av1/common/entropymode.h
index 40b4fc37b0e7d9805c2142d3bab6ee91cc16cabf..011d5f0df21f4f32955adec9495d6ab5fee8e0b7 100644
--- a/av1/common/entropymode.h
+++ b/av1/common/entropymode.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_ENTROPYMODE_H_
-#define VP10_COMMON_ENTROPYMODE_H_
+#ifndef AV1_COMMON_ENTROPYMODE_H_
+#define AV1_COMMON_ENTROPYMODE_H_
 
 #include "av1/common/entropy.h"
 #include "av1/common/entropymv.h"
@@ -28,7 +28,7 @@ extern "C" {
 
 #define INTER_OFFSET(mode) ((mode)-NEARESTMV)
 
-struct VP10Common;
+struct AV1Common;
 
 struct tx_probs {
   aom_prob p32x32[TX_SIZE_CONTEXTS][TX_SIZES - 1];
@@ -53,7 +53,7 @@ typedef struct frame_contexts {
   aom_prob y_mode_prob[BLOCK_SIZE_GROUPS][INTRA_MODES - 1];
   aom_prob uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
   aom_prob partition_prob[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
-  vp10_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
+  av1_coeff_probs_model coef_probs[TX_SIZES][PLANE_TYPES];
   aom_prob
       switchable_interp_prob[SWITCHABLE_FILTER_CONTEXTS][SWITCHABLE_FILTERS -
                                                          1];
@@ -78,7 +78,7 @@ typedef struct FRAME_COUNTS {
   unsigned int y_mode[BLOCK_SIZE_GROUPS][INTRA_MODES];
   unsigned int uv_mode[INTRA_MODES][INTRA_MODES];
   unsigned int partition[PARTITION_CONTEXTS][PARTITION_TYPES];
-  vp10_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
+  av1_coeff_count_model coef[TX_SIZES][PLANE_TYPES];
   unsigned int
       eob_branch[TX_SIZES][PLANE_TYPES][REF_TYPES][COEF_BANDS][COEFF_CONTEXTS];
   unsigned int
@@ -99,34 +99,34 @@ typedef struct FRAME_COUNTS {
 } FRAME_COUNTS;
 
 extern const aom_prob
-    vp10_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
+    av1_kf_y_mode_prob[INTRA_MODES][INTRA_MODES][INTRA_MODES - 1];
 #if !CONFIG_MISC_FIXES
-extern const aom_prob vp10_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
+extern const aom_prob av1_kf_uv_mode_prob[INTRA_MODES][INTRA_MODES - 1];
 extern const aom_prob
-    vp10_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
+    av1_kf_partition_probs[PARTITION_CONTEXTS][PARTITION_TYPES - 1];
 #endif
 
-extern const aom_tree_index vp10_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
-extern const aom_tree_index vp10_inter_mode_tree[TREE_SIZE(INTER_MODES)];
-extern const aom_tree_index vp10_partition_tree[TREE_SIZE(PARTITION_TYPES)];
+extern const aom_tree_index av1_intra_mode_tree[TREE_SIZE(INTRA_MODES)];
+extern const aom_tree_index av1_inter_mode_tree[TREE_SIZE(INTER_MODES)];
+extern const aom_tree_index av1_partition_tree[TREE_SIZE(PARTITION_TYPES)];
 extern const aom_tree_index
-    vp10_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
+    av1_switchable_interp_tree[TREE_SIZE(SWITCHABLE_FILTERS)];
 
-void vp10_setup_past_independence(struct VP10Common *cm);
+void av1_setup_past_independence(struct AV1Common *cm);
 
-void vp10_adapt_intra_frame_probs(struct VP10Common *cm);
-void vp10_adapt_inter_frame_probs(struct VP10Common *cm);
+void av1_adapt_intra_frame_probs(struct AV1Common *cm);
+void av1_adapt_inter_frame_probs(struct AV1Common *cm);
 
-void vp10_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
+void av1_tx_counts_to_branch_counts_32x32(const unsigned int *tx_count_32x32p,
                                            unsigned int (*ct_32x32p)[2]);
-void vp10_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
+void av1_tx_counts_to_branch_counts_16x16(const unsigned int *tx_count_16x16p,
                                            unsigned int (*ct_16x16p)[2]);
-void vp10_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
+void av1_tx_counts_to_branch_counts_8x8(const unsigned int *tx_count_8x8p,
                                          unsigned int (*ct_8x8p)[2]);
 
-extern const aom_tree_index vp10_ext_tx_tree[TREE_SIZE(TX_TYPES)];
+extern const aom_tree_index av1_ext_tx_tree[TREE_SIZE(TX_TYPES)];
 
-static INLINE int vp10_ceil_log2(int n) {
+static INLINE int av1_ceil_log2(int n) {
   int i = 1, p = 2;
   while (p < n) {
     i++;
@@ -139,4 +139,4 @@ static INLINE int vp10_ceil_log2(int n) {
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPYMODE_H_
+#endif  // AV1_COMMON_ENTROPYMODE_H_
diff --git a/av1/common/entropymv.c b/av1/common/entropymv.c
index 521d326f2893fb9586950d490202889b9e9db05b..ab9c53ba1b77de1be0293168e661295de2f81faf 100644
--- a/av1/common/entropymv.c
+++ b/av1/common/entropymv.c
@@ -15,12 +15,12 @@
 // Integer pel reference mv threshold for use of high-precision 1/8 mv
 #define COMPANDED_MVREF_THRESH 8
 
-const aom_tree_index vp10_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
+const aom_tree_index av1_mv_joint_tree[TREE_SIZE(MV_JOINTS)] = {
   -MV_JOINT_ZERO, 2, -MV_JOINT_HNZVZ, 4, -MV_JOINT_HZVNZ, -MV_JOINT_HNZVNZ
 };
 
 /* clang-format off */
-const aom_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
+const aom_tree_index av1_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
   -MV_CLASS_0, 2,
   -MV_CLASS_1, 4,
   6, 8,
@@ -34,11 +34,11 @@ const aom_tree_index vp10_mv_class_tree[TREE_SIZE(MV_CLASSES)] = {
 };
 /* clang-format on */
 
-const aom_tree_index vp10_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
+const aom_tree_index av1_mv_class0_tree[TREE_SIZE(CLASS0_SIZE)] = {
   -0, -1,
 };
 
-const aom_tree_index vp10_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
+const aom_tree_index av1_mv_fp_tree[TREE_SIZE(MV_FP_SIZE)] = { -0, 2,  -1,
                                                                 4,  -2, -3 };
 
 static const nmv_context default_nmv_context = {
@@ -114,7 +114,7 @@ static INLINE int mv_class_base(MV_CLASS_TYPE c) {
   return c ? CLASS0_SIZE << (c + 2) : 0;
 }
 
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset) {
   const MV_CLASS_TYPE c = (z >= CLASS0_SIZE * 4096)
                               ? MV_CLASS_10
                               : (MV_CLASS_TYPE)log_in_base_2[z >> 3];
@@ -122,7 +122,7 @@ MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset) {
   return c;
 }
 
-int vp10_use_mv_hp(const MV *ref) {
+int av1_use_mv_hp(const MV *ref) {
 #if CONFIG_MISC_FIXES
   (void)ref;
   return 1;
@@ -140,7 +140,7 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts, int incr,
   comp_counts->sign[s] += incr;
   z = (s ? -v : v) - 1; /* magnitude - 1 */
 
-  c = vp10_get_mv_class(z, &o);
+  c = av1_get_mv_class(z, &o);
   comp_counts->classes[c] += incr;
 
   d = (o >> 3);     /* int mv data */
@@ -160,9 +160,9 @@ static void inc_mv_component(int v, nmv_component_counts *comp_counts, int incr,
   }
 }
 
-void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
+void av1_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
   if (counts != NULL) {
-    const MV_JOINT_TYPE j = vp10_get_mv_joint(mv);
+    const MV_JOINT_TYPE j = av1_get_mv_joint(mv);
     ++counts->joints[j];
 
     if (mv_joint_vertical(j)) {
@@ -177,14 +177,14 @@ void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
   }
 }
 
-void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
+void av1_adapt_mv_probs(AV1_COMMON *cm, int allow_hp) {
   int i, j;
 
   nmv_context *fc = &cm->fc->nmvc;
   const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
   const nmv_context_counts *counts = &cm->counts.mv;
 
-  aom_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+  aom_tree_merge_probs(av1_mv_joint_tree, pre_fc->joints, counts->joints,
                        fc->joints);
 
   for (i = 0; i < 2; ++i) {
@@ -193,19 +193,19 @@ void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
     const nmv_component_counts *c = &counts->comps[i];
 
     comp->sign = mode_mv_merge_probs(pre_comp->sign, c->sign);
-    aom_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+    aom_tree_merge_probs(av1_mv_class_tree, pre_comp->classes, c->classes,
                          comp->classes);
-    aom_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+    aom_tree_merge_probs(av1_mv_class0_tree, pre_comp->class0, c->class0,
                          comp->class0);
 
     for (j = 0; j < MV_OFFSET_BITS; ++j)
       comp->bits[j] = mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
 
     for (j = 0; j < CLASS0_SIZE; ++j)
-      aom_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+      aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->class0_fp[j],
                            c->class0_fp[j], comp->class0_fp[j]);
 
-    aom_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+    aom_tree_merge_probs(av1_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
 
     if (allow_hp) {
       comp->class0_hp = mode_mv_merge_probs(pre_comp->class0_hp, c->class0_hp);
@@ -214,4 +214,4 @@ void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
   }
 }
 
-void vp10_init_mv_probs(VP10_COMMON *cm) { cm->fc->nmvc = default_nmv_context; }
+void av1_init_mv_probs(AV1_COMMON *cm) { cm->fc->nmvc = default_nmv_context; }
diff --git a/av1/common/entropymv.h b/av1/common/entropymv.h
index 89a5570b7ce050d64f7b62c946e5454aa0d3f1fb..54e4ffe6f4e1c3d8a1a49fbf8286910e23cf3d5a 100644
--- a/av1/common/entropymv.h
+++ b/av1/common/entropymv.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_ENTROPYMV_H_
-#define VP10_COMMON_ENTROPYMV_H_
+#ifndef AV1_COMMON_ENTROPYMV_H_
+#define AV1_COMMON_ENTROPYMV_H_
 
 #include "./aom_config.h"
 
@@ -22,12 +22,12 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
-void vp10_init_mv_probs(struct VP10Common *cm);
+void av1_init_mv_probs(struct AV1Common *cm);
 
-void vp10_adapt_mv_probs(struct VP10Common *cm, int usehp);
-int vp10_use_mv_hp(const MV *ref);
+void av1_adapt_mv_probs(struct AV1Common *cm, int usehp);
+int av1_use_mv_hp(const MV *ref);
 
 #define MV_UPDATE_PROB 252
 
@@ -77,10 +77,10 @@ typedef enum {
 #define MV_UPP ((1 << MV_IN_USE_BITS) - 1)
 #define MV_LOW (-(1 << MV_IN_USE_BITS))
 
-extern const aom_tree_index vp10_mv_joint_tree[];
-extern const aom_tree_index vp10_mv_class_tree[];
-extern const aom_tree_index vp10_mv_class0_tree[];
-extern const aom_tree_index vp10_mv_fp_tree[];
+extern const aom_tree_index av1_mv_joint_tree[];
+extern const aom_tree_index av1_mv_class_tree[];
+extern const aom_tree_index av1_mv_class0_tree[];
+extern const aom_tree_index av1_mv_fp_tree[];
 
 typedef struct {
   aom_prob sign;
@@ -98,7 +98,7 @@ typedef struct {
   nmv_component comps[2];
 } nmv_context;
 
-static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) {
+static INLINE MV_JOINT_TYPE av1_get_mv_joint(const MV *mv) {
   if (mv->row == 0) {
     return mv->col == 0 ? MV_JOINT_ZERO : MV_JOINT_HNZVZ;
   } else {
@@ -106,7 +106,7 @@ static INLINE MV_JOINT_TYPE vp10_get_mv_joint(const MV *mv) {
   }
 }
 
-MV_CLASS_TYPE vp10_get_mv_class(int z, int *offset);
+MV_CLASS_TYPE av1_get_mv_class(int z, int *offset);
 
 typedef struct {
   unsigned int sign[2];
@@ -124,10 +124,10 @@ typedef struct {
   nmv_component_counts comps[2];
 } nmv_context_counts;
 
-void vp10_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
+void av1_inc_mv(const MV *mv, nmv_context_counts *mvctx, const int usehp);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENTROPYMV_H_
+#endif  // AV1_COMMON_ENTROPYMV_H_
diff --git a/av1/common/enums.h b/av1/common/enums.h
index 52c9592c119f85b36cf45ae42e1efea8514cb949..a133a28aa9fdaad8b3bc7018b42808670a0dd64a 100644
--- a/av1/common/enums.h
+++ b/av1/common/enums.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_ENUMS_H_
-#define VP10_COMMON_ENUMS_H_
+#ifndef AV1_COMMON_ENUMS_H_
+#define AV1_COMMON_ENUMS_H_
 
 #include "./aom_config.h"
 #include "aom/aom_integer.h"
@@ -143,4 +143,4 @@ typedef uint8_t PREDICTION_MODE;
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ENUMS_H_
+#endif  // AV1_COMMON_ENUMS_H_
diff --git a/av1/common/filter.c b/av1/common/filter.c
index 8710f800697e8d72af6bf71295ae1d4699a960cf..aa6626a57b58cef94b8b0db17de5f0ddce40cdd6 100644
--- a/av1/common/filter.c
+++ b/av1/common/filter.c
@@ -64,6 +64,6 @@ DECLARE_ALIGNED(256, static const InterpKernel,
   { 0, -3, 2, 41, 63, 29, -2, -2 },   { 0, -3, 1, 38, 64, 32, -1, -3 }
 };
 
-const InterpKernel *vp10_filter_kernels[4] = {
+const InterpKernel *av1_filter_kernels[4] = {
   sub_pel_filters_8, sub_pel_filters_8lp, sub_pel_filters_8s, bilinear_filters
 };
diff --git a/av1/common/filter.h b/av1/common/filter.h
index 07a1bd6a04fc59a243a1f105d09808ded5a6c788..6e3f547c29030e32457ddde9a191d2717d92f616 100644
--- a/av1/common/filter.h
+++ b/av1/common/filter.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_FILTER_H_
-#define VP10_COMMON_FILTER_H_
+#ifndef AV1_COMMON_FILTER_H_
+#define AV1_COMMON_FILTER_H_
 
 #include "./aom_config.h"
 #include "aom/aom_integer.h"
@@ -33,10 +33,10 @@ extern "C" {
 
 typedef uint8_t INTERP_FILTER;
 
-extern const InterpKernel *vp10_filter_kernels[4];
+extern const InterpKernel *av1_filter_kernels[4];
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_FILTER_H_
+#endif  // AV1_COMMON_FILTER_H_
diff --git a/av1/common/frame_buffers.c b/av1/common/frame_buffers.c
index c9eeb256935e3b3571059f54316633313179bc86..0ca39196c94b4133f817e9e740ef6448b46a8b2e 100644
--- a/av1/common/frame_buffers.c
+++ b/av1/common/frame_buffers.c
@@ -14,9 +14,9 @@
 #include "av1/common/frame_buffers.h"
 #include "aom_mem/aom_mem.h"
 
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
   assert(list != NULL);
-  vp10_free_internal_frame_buffers(list);
+  av1_free_internal_frame_buffers(list);
 
   list->num_internal_frame_buffers =
       VPX_MAXIMUM_REF_BUFFERS + VPX_MAXIMUM_WORK_BUFFERS;
@@ -25,7 +25,7 @@ int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list) {
   return (list->int_fb == NULL);
 }
 
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list) {
   int i;
 
   assert(list != NULL);
@@ -38,7 +38,7 @@ void vp10_free_internal_frame_buffers(InternalFrameBufferList *list) {
   list->int_fb = NULL;
 }
 
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
                           aom_codec_frame_buffer_t *fb) {
   int i;
   InternalFrameBufferList *const int_fb_list =
@@ -73,7 +73,7 @@ int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
   return 0;
 }
 
-int vp10_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb) {
   InternalFrameBuffer *const int_fb = (InternalFrameBuffer *)fb->priv;
   (void)cb_priv;
   if (int_fb) int_fb->in_use = 0;
diff --git a/av1/common/frame_buffers.h b/av1/common/frame_buffers.h
index 51d3a0cd5d2fbdbf2d61ca48631d26c072beacb1..c062ffed47667f530bfc76560fb6e7efcadc21e2 100644
--- a/av1/common/frame_buffers.h
+++ b/av1/common/frame_buffers.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_FRAME_BUFFERS_H_
-#define VP10_COMMON_FRAME_BUFFERS_H_
+#ifndef AV1_COMMON_FRAME_BUFFERS_H_
+#define AV1_COMMON_FRAME_BUFFERS_H_
 
 #include "aom/aom_frame_buffer.h"
 #include "aom/aom_integer.h"
@@ -31,24 +31,24 @@ typedef struct InternalFrameBufferList {
 } InternalFrameBufferList;
 
 // Initializes |list|. Returns 0 on success.
-int vp10_alloc_internal_frame_buffers(InternalFrameBufferList *list);
+int av1_alloc_internal_frame_buffers(InternalFrameBufferList *list);
 
 // Free any data allocated to the frame buffers.
-void vp10_free_internal_frame_buffers(InternalFrameBufferList *list);
+void av1_free_internal_frame_buffers(InternalFrameBufferList *list);
 
 // Callback used by libaom to request an external frame buffer. |cb_priv|
 // Callback private data, which points to an InternalFrameBufferList.
 // |min_size| is the minimum size in bytes needed to decode the next frame.
 // |fb| pointer to the frame buffer.
-int vp10_get_frame_buffer(void *cb_priv, size_t min_size,
+int av1_get_frame_buffer(void *cb_priv, size_t min_size,
                           aom_codec_frame_buffer_t *fb);
 
 // Callback used by libaom when there are no references to the frame buffer.
 // |cb_priv| is not used. |fb| pointer to the frame buffer.
-int vp10_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
+int av1_release_frame_buffer(void *cb_priv, aom_codec_frame_buffer_t *fb);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_FRAME_BUFFERS_H_
+#endif  // AV1_COMMON_FRAME_BUFFERS_H_
diff --git a/av1/common/idct.c b/av1/common/idct.c
index b9679d836c3793b1426ccc5924c86820903187c5..37eb5a9cf83986d5d2123d77ddd60fb2da240651 100644
--- a/av1/common/idct.c
+++ b/av1/common/idct.c
@@ -18,7 +18,7 @@
 #include "aom_dsp/inv_txfm.h"
 #include "aom_ports/mem.h"
 
-void vp10_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest, int stride,
                           int tx_type) {
   const transform_2d IHT_4[] = {
     { idct4_c, idct4_c },   // DCT_DCT  = 0
@@ -57,7 +57,7 @@ static const transform_2d IHT_8[] = {
   { iadst8_c, iadst8_c }  // ADST_ADST = 3
 };
 
-void vp10_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest, int stride,
                           int tx_type) {
   int i, j;
   tran_low_t out[8 * 8];
@@ -90,7 +90,7 @@ static const transform_2d IHT_16[] = {
   { iadst16_c, iadst16_c }  // ADST_ADST = 3
 };
 
-void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
                              int tx_type) {
   int i, j;
   tran_low_t out[16 * 16];
@@ -117,7 +117,7 @@ void vp10_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest, int stride,
 }
 
 // idct
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                       int eob) {
   if (eob > 1)
     aom_idct4x4_16_add(input, dest, stride);
@@ -125,7 +125,7 @@ void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_idct4x4_1_add(input, dest, stride);
 }
 
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                       int eob) {
   if (eob > 1)
     aom_iwht4x4_16_add(input, dest, stride);
@@ -133,14 +133,14 @@ void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_iwht4x4_1_add(input, dest, stride);
 }
 
-void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
                       int eob) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to decide what to do.
-  // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+  // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
   // Combine that with code here.
   if (eob == 1)
     // DC only DCT coefficient
@@ -151,7 +151,7 @@ void vp10_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_idct8x8_64_add(input, dest, stride);
 }
 
-void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
                         int eob) {
   /* The calculation can be simplified if there are not many non-zero dct
    * coefficients. Use eobs to separate different cases. */
@@ -163,7 +163,7 @@ void vp10_idct16x16_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_idct16x16_256_add(input, dest, stride);
 }
 
-void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
                         int eob) {
   if (eob == 1)
     aom_idct32x32_1_add(input, dest, stride);
@@ -174,48 +174,48 @@ void vp10_idct32x32_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_idct32x32_1024_add(input, dest, stride);
 }
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type, int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_iwht4x4_add(input, dest, stride, eob);
+    av1_iwht4x4_add(input, dest, stride, eob);
   } else {
     switch (tx_type) {
-      case DCT_DCT: vp10_idct4x4_add(input, dest, stride, eob); break;
+      case DCT_DCT: av1_idct4x4_add(input, dest, stride, eob); break;
       case ADST_DCT:
       case DCT_ADST:
-      case ADST_ADST: vp10_iht4x4_16_add(input, dest, stride, tx_type); break;
+      case ADST_ADST: av1_iht4x4_16_add(input, dest, stride, tx_type); break;
       default: assert(0); break;
     }
   }
 }
 
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct8x8_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct8x8_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht8x8_64_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht8x8_64_add(input, dest, stride, tx_type); break;
     default: assert(0); break;
   }
 }
 
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct16x16_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct16x16_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
-    case ADST_ADST: vp10_iht16x16_256_add(input, dest, stride, tx_type); break;
+    case ADST_ADST: av1_iht16x16_256_add(input, dest, stride, tx_type); break;
     default: assert(0); break;
   }
 }
 
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_idct32x32_add(input, dest, stride, eob); break;
+    case DCT_DCT: av1_idct32x32_add(input, dest, stride, eob); break;
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST: assert(0); break;
@@ -223,8 +223,8 @@ void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iht4x4_16_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int tx_type, int bd) {
   const highbd_transform_2d IHT_4[] = {
     { aom_highbd_idct4_c, aom_highbd_idct4_c },   // DCT_DCT  = 0
@@ -264,7 +264,7 @@ static const highbd_transform_2d HIGH_IHT_8[] = {
   { aom_highbd_iadst8_c, aom_highbd_iadst8_c }  // ADST_ADST = 3
 };
 
-void vp10_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_iht8x8_64_add_c(const tran_low_t *input, uint8_t *dest8,
                                  int stride, int tx_type, int bd) {
   int i, j;
   tran_low_t out[8 * 8];
@@ -298,7 +298,7 @@ static const highbd_transform_2d HIGH_IHT_16[] = {
   { aom_highbd_iadst16_c, aom_highbd_iadst16_c }  // ADST_ADST = 3
 };
 
-void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
                                     int stride, int tx_type, int bd) {
   int i, j;
   tran_low_t out[16 * 16];
@@ -326,7 +326,7 @@ void vp10_highbd_iht16x16_256_add_c(const tran_low_t *input, uint8_t *dest8,
 }
 
 // idct
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, int bd) {
   if (eob > 1)
     aom_highbd_idct4x4_16_add(input, dest, stride, bd);
@@ -334,7 +334,7 @@ void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_highbd_idct4x4_1_add(input, dest, stride, bd);
 }
 
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, int bd) {
   if (eob > 1)
     aom_highbd_iwht4x4_16_add(input, dest, stride, bd);
@@ -342,14 +342,14 @@ void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
     aom_highbd_iwht4x4_1_add(input, dest, stride, bd);
 }
 
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, int bd) {
   // If dc is 1, then input[0] is the reconstructed value, do not need
   // dequantization. Also, when dc is 1, dc is counted in eobs, namely eobs >=1.
 
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to decide what to do.
-  // TODO(yunqingwang): "eobs = 1" case is also handled in vp10_short_idct8x8_c.
+  // TODO(yunqingwang): "eobs = 1" case is also handled in av1_short_idct8x8_c.
   // Combine that with code here.
   // DC only DCT coefficient
   if (eob == 1) {
@@ -361,7 +361,7 @@ void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
   }
 }
 
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
                                int stride, int eob, int bd) {
   // The calculation can be simplified if there are not many non-zero dct
   // coefficients. Use eobs to separate different cases.
@@ -375,7 +375,7 @@ void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
                                int stride, int eob, int bd) {
   // Non-zero coeff only in upper-left 8x8
   if (eob == 1) {
@@ -387,63 +387,63 @@ void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
   }
 }
 
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd, TX_TYPE tx_type,
                                   int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_highbd_iwht4x4_add(input, dest, stride, eob, bd);
+    av1_highbd_iwht4x4_add(input, dest, stride, eob, bd);
   } else {
     switch (tx_type) {
       case DCT_DCT:
-        vp10_highbd_idct4x4_add(input, dest, stride, eob, bd);
+        av1_highbd_idct4x4_add(input, dest, stride, eob, bd);
         break;
       case ADST_DCT:
       case DCT_ADST:
       case ADST_ADST:
-        vp10_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
+        av1_highbd_iht4x4_16_add(input, dest, stride, tx_type, bd);
         break;
       default: assert(0); break;
     }
   }
 }
 
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd,
                                   TX_TYPE tx_type) {
   switch (tx_type) {
-    case DCT_DCT: vp10_highbd_idct8x8_add(input, dest, stride, eob, bd); break;
+    case DCT_DCT: av1_highbd_idct8x8_add(input, dest, stride, eob, bd); break;
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_highbd_iht8x8_64_add(input, dest, stride, tx_type, bd);
+      av1_highbd_iht8x8_64_add(input, dest, stride, tx_type, bd);
       break;
     default: assert(0); break;
   }
 }
 
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
                                     int stride, int eob, int bd,
                                     TX_TYPE tx_type) {
   switch (tx_type) {
     case DCT_DCT:
-      vp10_highbd_idct16x16_add(input, dest, stride, eob, bd);
+      av1_highbd_idct16x16_add(input, dest, stride, eob, bd);
       break;
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_highbd_iht16x16_256_add(input, dest, stride, tx_type, bd);
+      av1_highbd_iht16x16_256_add(input, dest, stride, tx_type, bd);
       break;
     default: assert(0); break;
   }
 }
 
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
                                     int stride, int eob, int bd,
                                     TX_TYPE tx_type) {
   switch (tx_type) {
     case DCT_DCT:
-      vp10_highbd_idct32x32_add(input, dest, stride, eob, bd);
+      av1_highbd_idct32x32_add(input, dest, stride, eob, bd);
       break;
     case ADST_DCT:
     case DCT_ADST:
@@ -451,4 +451,4 @@ void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
     default: assert(0); break;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/idct.h b/av1/common/idct.h
index 2d554ab9d125ec197d2544383666fa8a49e76f75..3b680c6b02d6315594451f1ae4dd92c6a369866c 100644
--- a/av1/common/idct.h
+++ b/av1/common/idct.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_IDCT_H_
-#define VP10_COMMON_IDCT_H_
+#ifndef AV1_COMMON_IDCT_H_
+#define AV1_COMMON_IDCT_H_
 
 #include <assert.h>
 
@@ -31,53 +31,53 @@ typedef struct {
   transform_1d cols, rows;  // vertical and horizontal
 } transform_2d;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*highbd_transform_1d)(const tran_low_t *, tran_low_t *, int bd);
 
 typedef struct {
   highbd_transform_1d cols, rows;  // vertical and horizontal
 } highbd_transform_2d;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                       int eob);
-void vp10_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                       int eob);
 
-void vp10_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type, int lossless);
-void vp10_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest, int stride,
                            int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, TX_TYPE tx_type);
-void vp10_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, TX_TYPE tx_type);
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_iwht4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, int bd);
-void vp10_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct4x4_add(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, int bd);
-void vp10_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_highbd_idct8x8_add(const tran_low_t *input, uint8_t *dest, int stride,
                              int eob, int bd);
-void vp10_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct16x16_add(const tran_low_t *input, uint8_t *dest,
                                int stride, int eob, int bd);
-void vp10_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_idct32x32_add(const tran_low_t *input, uint8_t *dest,
                                int stride, int eob, int bd);
-void vp10_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_4x4(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd, TX_TYPE tx_type,
                                   int lossless);
-void vp10_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_8x8(const tran_low_t *input, uint8_t *dest,
                                   int stride, int eob, int bd, TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_16x16(const tran_low_t *input, uint8_t *dest,
                                     int stride, int eob, int bd,
                                     TX_TYPE tx_type);
-void vp10_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
+void av1_highbd_inv_txfm_add_32x32(const tran_low_t *input, uint8_t *dest,
                                     int stride, int eob, int bd,
                                     TX_TYPE tx_type);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_IDCT_H_
+#endif  // AV1_COMMON_IDCT_H_
diff --git a/av1/common/loopfilter.c b/av1/common/loopfilter.c
index 2cb16f5239a663954f5cd03d48cae3f8f22ea04b..b0b65964915835271880f7a5db2e17fb9a1cfcdd 100644
--- a/av1/common/loopfilter.c
+++ b/av1/common/loopfilter.c
@@ -237,7 +237,7 @@ static uint8_t get_filter_level(const loop_filter_info_n *lfi_n,
       ->lvl[mbmi->segment_id][mbmi->ref_frame[0]][mode_lf_lut[mbmi->mode]];
 }
 
-void vp10_loop_filter_init(VP10_COMMON *cm) {
+void av1_loop_filter_init(AV1_COMMON *cm) {
   loop_filter_info_n *lfi = &cm->lf_info;
   struct loopfilter *lf = &cm->lf;
   int lvl;
@@ -251,7 +251,7 @@ void vp10_loop_filter_init(VP10_COMMON *cm) {
     memset(lfi->lfthr[lvl].hev_thr, (lvl >> 4), SIMD_WIDTH);
 }
 
-void vp10_loop_filter_frame_init(VP10_COMMON *cm, int default_filt_lvl) {
+void av1_loop_filter_frame_init(AV1_COMMON *cm, int default_filt_lvl) {
   int seg_id;
   // n_shift is the multiplier for lf_deltas
   // the multiplier is 1 for when filter_lvl is between 0 and 31;
@@ -393,7 +393,7 @@ static void filter_selectively_vert_row2(int subsampling_factor, uint8_t *s,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_vert_row2(
     int subsampling_factor, uint16_t *s, int pitch, unsigned int mask_16x16_l,
     unsigned int mask_8x8_l, unsigned int mask_4x4_l,
@@ -489,7 +489,7 @@ static void highbd_filter_selectively_vert_row2(
     mask_4x4_int_1 >>= 1;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void filter_selectively_horiz(
     uint8_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
@@ -584,7 +584,7 @@ static void filter_selectively_horiz(
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_horiz(
     uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
     unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -683,7 +683,7 @@ static void highbd_filter_selectively_horiz(
     mask_4x4_int >>= count;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // This function ors into the current lfm structure, where to do loop
 // filters for the specific mi we are looking at. It uses information
@@ -825,7 +825,7 @@ static void build_y_mask(const loop_filter_info_n *const lfi_n,
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
 // TODO(JBB): This function only works for yv12.
-void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
+void av1_setup_mask(AV1_COMMON *const cm, const int mi_row, const int mi_col,
                      MODE_INFO **mi, const int mode_info_stride,
                      LOOP_FILTER_MASK *lfm) {
   int idx_32, idx_16, idx_8;
@@ -860,7 +860,7 @@ void vp10_setup_mask(VP10_COMMON *const cm, const int mi_row, const int mi_col,
       (mi_col + MI_BLOCK_SIZE > cm->mi_cols ? cm->mi_cols - mi_col
                                             : MI_BLOCK_SIZE);
 
-  vp10_zero(*lfm);
+  av1_zero(*lfm);
   assert(mip[0] != NULL);
 
   // TODO(jimbankoski): Try moving most of the following code into decode
@@ -1115,7 +1115,7 @@ static void filter_selectively_vert(
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_filter_selectively_vert(
     uint16_t *s, int pitch, unsigned int mask_16x16, unsigned int mask_8x8,
     unsigned int mask_4x4, unsigned int mask_4x4_int,
@@ -1149,9 +1149,9 @@ static void highbd_filter_selectively_vert(
     mask_4x4_int >>= 1;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_filter_block_plane_non420(VP10_COMMON *cm,
+void av1_filter_block_plane_non420(AV1_COMMON *cm,
                                     struct macroblockd_plane *plane,
                                     MODE_INFO **mi_8x8, int mi_row,
                                     int mi_col) {
@@ -1253,7 +1253,7 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
 
     // Disable filtering on the leftmost column
     border_mask = ~(mi_col == 0);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_vert(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1270,7 +1270,7 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
     filter_selectively_vert(dst->buf, dst->stride, mask_16x16_c & border_mask,
                             mask_8x8_c & border_mask, mask_4x4_c & border_mask,
                             mask_4x4_int[r], &cm->lf_info, &lfl[r << 3]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += 8 * dst->stride;
     mi_8x8 += row_step_stride;
   }
@@ -1294,7 +1294,7 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
       mask_8x8_r = mask_8x8[r];
       mask_4x4_r = mask_4x4[r];
     }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
                                       dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1309,12 +1309,12 @@ void vp10_filter_block_plane_non420(VP10_COMMON *cm,
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
                              &lfl[r << 3]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += 8 * dst->stride;
   }
 }
 
-void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
+void av1_filter_block_plane_ss00(AV1_COMMON *const cm,
                                   struct macroblockd_plane *const plane,
                                   int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
@@ -1335,7 +1335,7 @@ void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
     unsigned int mask_4x4_int_l = mask_4x4_int & 0xffff;
 
 // Disable filtering on the leftmost column.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_vert_row2(
           plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1350,7 +1350,7 @@ void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
     filter_selectively_vert_row2(
         plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
         mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_y[r << 3]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     dst->buf += 16 * dst->stride;
     mask_16x16 >>= 16;
     mask_8x8 >>= 16;
@@ -1380,7 +1380,7 @@ void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
       mask_4x4_r = mask_4x4 & 0xff;
     }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(
           CONVERT_TO_SHORTPTR(dst->buf), dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1395,7 +1395,7 @@ void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int & 0xff, &cm->lf_info,
                              &lfm->lfl_y[r << 3]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     dst->buf += 8 * dst->stride;
     mask_16x16 >>= 8;
@@ -1405,7 +1405,7 @@ void vp10_filter_block_plane_ss00(VP10_COMMON *const cm,
   }
 }
 
-void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
+void av1_filter_block_plane_ss11(AV1_COMMON *const cm,
                                   struct macroblockd_plane *const plane,
                                   int mi_row, LOOP_FILTER_MASK *lfm) {
   struct buf_2d *const dst = &plane->dst;
@@ -1439,7 +1439,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
       unsigned int mask_4x4_int_l = mask_4x4_int & 0xff;
 
 // Disable filtering on the leftmost column.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         highbd_filter_selectively_vert_row2(
             plane->subsampling_x, CONVERT_TO_SHORTPTR(dst->buf), dst->stride,
@@ -1455,7 +1455,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
       filter_selectively_vert_row2(
           plane->subsampling_x, dst->buf, dst->stride, mask_16x16_l, mask_8x8_l,
           mask_4x4_l, mask_4x4_int_l, &cm->lf_info, &lfm->lfl_uv[r << 1]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       dst->buf += 16 * dst->stride;
       mask_16x16 >>= 8;
@@ -1494,7 +1494,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
       mask_4x4_r = mask_4x4 & 0xf;
     }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       highbd_filter_selectively_horiz(CONVERT_TO_SHORTPTR(dst->buf),
                                       dst->stride, mask_16x16_r, mask_8x8_r,
@@ -1509,7 +1509,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
     filter_selectively_horiz(dst->buf, dst->stride, mask_16x16_r, mask_8x8_r,
                              mask_4x4_r, mask_4x4_int_r, &cm->lf_info,
                              &lfm->lfl_uv[r << 1]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     dst->buf += 8 * dst->stride;
     mask_16x16 >>= 4;
@@ -1519,7 +1519,7 @@ void vp10_filter_block_plane_ss11(VP10_COMMON *const cm,
   }
 }
 
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, AV1_COMMON *cm,
                            struct macroblockd_plane planes[MAX_MB_PLANE],
                            int start, int stop, int y_only) {
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
@@ -1542,22 +1542,22 @@ void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
     for (mi_col = 0; mi_col < cm->mi_cols; mi_col += MI_BLOCK_SIZE) {
       int plane;
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+      av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
-      vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+      av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
         switch (path) {
           case LF_PATH_420:
-            vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_444:
-            vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_SLOW:
-            vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+            av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
                                            mi_row, mi_col);
             break;
         }
@@ -1566,7 +1566,7 @@ void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer, VP10_COMMON *cm,
   }
 }
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                             MACROBLOCKD *xd, int frame_filter_level, int y_only,
                             int partial_frame) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
@@ -1579,13 +1579,13 @@ void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
     mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
-  vp10_loop_filter_frame_init(cm, frame_filter_level);
-  vp10_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
+  av1_loop_filter_frame_init(cm, frame_filter_level);
+  av1_loop_filter_rows(frame, cm, xd->plane, start_mi_row, end_mi_row, y_only);
 }
 
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
     LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
-    struct VP10Common *cm,
+    struct AV1Common *cm,
     const struct macroblockd_plane planes[MAX_MB_PLANE]) {
   lf_data->frame_buffer = frame_buffer;
   lf_data->cm = cm;
@@ -1595,9 +1595,9 @@ void vp10_loop_filter_data_reset(
   memcpy(lf_data->planes, planes, sizeof(lf_data->planes));
 }
 
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused) {
   (void)unused;
-  vp10_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
+  av1_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
                         lf_data->start, lf_data->stop, lf_data->y_only);
   return 1;
 }
diff --git a/av1/common/loopfilter.h b/av1/common/loopfilter.h
index 73273e453f3a54cea4cd8b60fd05ecafe341b949..a8992f48ac7599f8ab75031c21a6b8404e57b152 100644
--- a/av1/common/loopfilter.h
+++ b/av1/common/loopfilter.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_LOOPFILTER_H_
-#define VP10_COMMON_LOOPFILTER_H_
+#ifndef AV1_COMMON_LOOPFILTER_H_
+#define AV1_COMMON_LOOPFILTER_H_
 
 #include "aom_ports/mem.h"
 #include "./aom_config.h"
@@ -92,49 +92,49 @@ typedef struct {
 } LOOP_FILTER_MASK;
 
 /* assorted loopfilter functions which get used elsewhere */
-struct VP10Common;
+struct AV1Common;
 struct macroblockd;
-struct VP10LfSyncData;
+struct AV1LfSyncData;
 
 // This function sets up the bit masks for the entire 64x64 region represented
 // by mi_row, mi_col.
-void vp10_setup_mask(struct VP10Common *const cm, const int mi_row,
+void av1_setup_mask(struct AV1Common *const cm, const int mi_row,
                      const int mi_col, MODE_INFO **mi_8x8,
                      const int mode_info_stride, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_ss00(struct VP10Common *const cm,
+void av1_filter_block_plane_ss00(struct AV1Common *const cm,
                                   struct macroblockd_plane *const plane,
                                   int mi_row, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_ss11(struct VP10Common *const cm,
+void av1_filter_block_plane_ss11(struct AV1Common *const cm,
                                   struct macroblockd_plane *const plane,
                                   int mi_row, LOOP_FILTER_MASK *lfm);
 
-void vp10_filter_block_plane_non420(struct VP10Common *cm,
+void av1_filter_block_plane_non420(struct AV1Common *cm,
                                     struct macroblockd_plane *plane,
                                     MODE_INFO **mi_8x8, int mi_row, int mi_col);
 
-void vp10_loop_filter_init(struct VP10Common *cm);
+void av1_loop_filter_init(struct AV1Common *cm);
 
 // Update the loop filter for the current frame.
-// This should be called before vp10_loop_filter_rows(),
-// vp10_loop_filter_frame()
+// This should be called before av1_loop_filter_rows(),
+// av1_loop_filter_frame()
 // calls this function directly.
-void vp10_loop_filter_frame_init(struct VP10Common *cm, int default_filt_lvl);
+void av1_loop_filter_frame_init(struct AV1Common *cm, int default_filt_lvl);
 
-void vp10_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
+void av1_loop_filter_frame(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
                             struct macroblockd *mbd, int filter_level,
                             int y_only, int partial_frame);
 
 // Apply the loop filter to [start, stop) macro block rows in frame_buffer.
-void vp10_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
-                           struct VP10Common *cm,
+void av1_loop_filter_rows(YV12_BUFFER_CONFIG *frame_buffer,
+                           struct AV1Common *cm,
                            struct macroblockd_plane planes[MAX_MB_PLANE],
                            int start, int stop, int y_only);
 
 typedef struct LoopFilterWorkerData {
   YV12_BUFFER_CONFIG *frame_buffer;
-  struct VP10Common *cm;
+  struct AV1Common *cm;
   struct macroblockd_plane planes[MAX_MB_PLANE];
 
   int start;
@@ -142,14 +142,14 @@ typedef struct LoopFilterWorkerData {
   int y_only;
 } LFWorkerData;
 
-void vp10_loop_filter_data_reset(
+void av1_loop_filter_data_reset(
     LFWorkerData *lf_data, YV12_BUFFER_CONFIG *frame_buffer,
-    struct VP10Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
+    struct AV1Common *cm, const struct macroblockd_plane planes[MAX_MB_PLANE]);
 
 // Operates on the rows described by 'lf_data'.
-int vp10_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
+int av1_loop_filter_worker(LFWorkerData *const lf_data, void *unused);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_LOOPFILTER_H_
+#endif  // AV1_COMMON_LOOPFILTER_H_
diff --git a/av1/common/mips/dspr2/itrans16_dspr2.c b/av1/common/mips/dspr2/itrans16_dspr2.c
index e9db822e36d9175c170cad83082dbe97052e2bb2..aaf3972efdbe5a8a700ad0811b024b9ffa96c7d6 100644
--- a/av1/common/mips/dspr2/itrans16_dspr2.c
+++ b/av1/common/mips/dspr2/itrans16_dspr2.c
@@ -22,7 +22,7 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
+void av1_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
                                  int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
@@ -91,7 +91,7 @@ void vp10_iht16x16_256_add_dspr2(const int16_t *input, uint8_t *dest, int pitch,
                                            dest[j * pitch + i]);
       }
     } break;
-    default: printf("vp10_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht16x16_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans4_dspr2.c b/av1/common/mips/dspr2/itrans4_dspr2.c
index ee8f566464a8864122876e297994f7d222c26d7e..a49db1fc3cb486720ebc4961982e0979be5dca82 100644
--- a/av1/common/mips/dspr2/itrans4_dspr2.c
+++ b/av1/common/mips/dspr2/itrans4_dspr2.c
@@ -22,7 +22,7 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
+void av1_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
                               int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[4 * 4]);
@@ -85,7 +85,7 @@ void vp10_iht4x4_16_add_dspr2(const int16_t *input, uint8_t *dest,
               ROUND_POWER_OF_TWO(temp_out[j], 4) + dest[j * dest_stride + i]);
       }
       break;
-    default: printf("vp10_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht4x4_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/dspr2/itrans8_dspr2.c b/av1/common/mips/dspr2/itrans8_dspr2.c
index 0c5a708a662aaaf5a6734de23c13a0153eb22607..1828bbcd952b9e871423212c40bcf6a5a832a4a3 100644
--- a/av1/common/mips/dspr2/itrans8_dspr2.c
+++ b/av1/common/mips/dspr2/itrans8_dspr2.c
@@ -21,7 +21,7 @@
 #include "aom_ports/mem.h"
 
 #if HAVE_DSPR2
-void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
+void av1_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
                               int dest_stride, int tx_type) {
   int i, j;
   DECLARE_ALIGNED(32, int16_t, out[8 * 8]);
@@ -79,7 +79,7 @@ void vp10_iht8x8_64_add_dspr2(const int16_t *input, uint8_t *dest,
               ROUND_POWER_OF_TWO(temp_out[j], 5) + dest[j * dest_stride + i]);
       }
       break;
-    default: printf("vp10_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
+    default: printf("av1_short_iht8x8_add_dspr2 : Invalid tx_type\n"); break;
   }
 }
 #endif  // #if HAVE_DSPR2
diff --git a/av1/common/mips/msa/idct16x16_msa.c b/av1/common/mips/msa/idct16x16_msa.c
index 9b75b310b6e4d31a0bbc83d8b6ea6d012d9484a7..54f384172f7f39b497963fca715c6242f0ef5252 100644
--- a/av1/common/mips/msa/idct16x16_msa.c
+++ b/av1/common/mips/msa/idct16x16_msa.c
@@ -14,7 +14,7 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
+void av1_iht16x16_256_add_msa(const int16_t *input, uint8_t *dst,
                                int32_t dst_stride, int32_t tx_type) {
   int32_t i;
   DECLARE_ALIGNED(32, int16_t, out[16 * 16]);
diff --git a/av1/common/mips/msa/idct4x4_msa.c b/av1/common/mips/msa/idct4x4_msa.c
index b6269bb17c6cd9580d468529e3d52382499a22ff..8ad1ba8c6bbfd0fb88961ba79be7e636d2d288c1 100644
--- a/av1/common/mips/msa/idct4x4_msa.c
+++ b/av1/common/mips/msa/idct4x4_msa.c
@@ -14,7 +14,7 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
+void av1_iht4x4_16_add_msa(const int16_t *input, uint8_t *dst,
                             int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
diff --git a/av1/common/mips/msa/idct8x8_msa.c b/av1/common/mips/msa/idct8x8_msa.c
index 110ce71f86aa3efed7e012bc345d5bf92e69f1e0..8caf8fc66a2bab03b0fda148064e8580ff1e3fb9 100644
--- a/av1/common/mips/msa/idct8x8_msa.c
+++ b/av1/common/mips/msa/idct8x8_msa.c
@@ -14,7 +14,7 @@
 #include "av1/common/enums.h"
 #include "aom_dsp/mips/inv_txfm_msa.h"
 
-void vp10_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
+void av1_iht8x8_64_add_msa(const int16_t *input, uint8_t *dst,
                             int32_t dst_stride, int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
diff --git a/av1/common/mv.h b/av1/common/mv.h
index 06261afdb4ed07e557fe2d54dc26970a0d38e9e8..445d5d4df91f4e315c8960c5c626c23433fb95f6 100644
--- a/av1/common/mv.h
+++ b/av1/common/mv.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_MV_H_
-#define VP10_COMMON_MV_H_
+#ifndef AV1_COMMON_MV_H_
+#define AV1_COMMON_MV_H_
 
 #include "aom/aom_integer.h"
 
@@ -53,4 +53,4 @@ static INLINE void clamp_mv(MV *mv, int min_col, int max_col, int min_row,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_MV_H_
+#endif  // AV1_COMMON_MV_H_
diff --git a/av1/common/mvref_common.c b/av1/common/mvref_common.c
index 7ce74c6b70f1c9753c35040fcd567773ba0cce8f..752d03efb842034bff1f9442cf9db4db1b1588a3 100644
--- a/av1/common/mvref_common.c
+++ b/av1/common/mvref_common.c
@@ -12,7 +12,7 @@
 
 // This function searches the neighbourhood of a given MB/SB
 // to try and find candidate reference vectors.
-static void find_mv_refs_idx(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void find_mv_refs_idx(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
                              int_mv *mv_ref_list, int block, int mi_row,
                              int mi_col, find_mv_refs_sync sync,
@@ -161,7 +161,7 @@ Done:
 #endif
 }
 
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                        MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
                        int_mv *mv_ref_list, int mi_row, int mi_col,
                        find_mv_refs_sync sync, void *const data,
@@ -171,14 +171,14 @@ void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
 }
 
 static void lower_mv_precision(MV *mv, int allow_hp) {
-  const int use_hp = allow_hp && vp10_use_mv_hp(mv);
+  const int use_hp = allow_hp && av1_use_mv_hp(mv);
   if (!use_hp) {
     if (mv->row & 1) mv->row += (mv->row > 0 ? -1 : 1);
     if (mv->col & 1) mv->col += (mv->col > 0 ? -1 : 1);
   }
 }
 
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
                             int_mv *near_mv) {
   int i;
   // Make sure all the candidates are properly clamped etc
@@ -189,7 +189,7 @@ void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
   *near_mv = mvlist[1];
 }
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
                                     int ref, int mi_row, int mi_col,
                                     int_mv *nearest_mv, int_mv *near_mv,
                                     uint8_t *mode_context) {
diff --git a/av1/common/mvref_common.h b/av1/common/mvref_common.h
index 66cc48abd22ccd9f4b53d5e10b768e8a6e21dbc8..014a53dc15905e96f90cc3e4d367137ddad8865a 100644
--- a/av1/common/mvref_common.h
+++ b/av1/common/mvref_common.h
@@ -8,8 +8,8 @@
  * Media Patent License 1.0 was not distributed with this source code in the
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
-#ifndef VP10_COMMON_MVREF_COMMON_H_
-#define VP10_COMMON_MVREF_COMMON_H_
+#ifndef AV1_COMMON_MVREF_COMMON_H_
+#define AV1_COMMON_MVREF_COMMON_H_
 
 #include "av1/common/onyxc_int.h"
 #include "av1/common/blockd.h"
@@ -303,7 +303,7 @@ static INLINE int is_inside(const TileInfo *const tile, int mi_col, int mi_row,
 }
 
 typedef void (*find_mv_refs_sync)(void *const data, int mi_row);
-void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+void av1_find_mv_refs(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                        MODE_INFO *mi, MV_REFERENCE_FRAME ref_frame,
                        int_mv *mv_ref_list, int mi_row, int mi_col,
                        find_mv_refs_sync sync, void *const data,
@@ -312,10 +312,10 @@ void vp10_find_mv_refs(const VP10_COMMON *cm, const MACROBLOCKD *xd,
 // check a list of motion vectors by sad score using a number rows of pixels
 // above and a number cols of pixels in the left to select the one with best
 // score to use as ref motion vector
-void vp10_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
+void av1_find_best_ref_mvs(int allow_hp, int_mv *mvlist, int_mv *nearest_mv,
                             int_mv *near_mv);
 
-void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
+void av1_append_sub8x8_mvs_for_idx(AV1_COMMON *cm, MACROBLOCKD *xd, int block,
                                     int ref, int mi_row, int mi_col,
                                     int_mv *nearest_mv, int_mv *near_mv,
                                     uint8_t *mode_context);
@@ -324,4 +324,4 @@ void vp10_append_sub8x8_mvs_for_idx(VP10_COMMON *cm, MACROBLOCKD *xd, int block,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_MVREF_COMMON_H_
+#endif  // AV1_COMMON_MVREF_COMMON_H_
diff --git a/av1/common/odintrin.h b/av1/common/odintrin.h
index c3a4033fed3abbdb801f5bb786ec63b8e9cc4737..47f17bf93642bc81e7c48a0e83a1bf32a24aae9b 100644
--- a/av1/common/odintrin.h
+++ b/av1/common/odintrin.h
@@ -8,8 +8,8 @@
  * Media Patent License 1.0 was not distributed with this source code in the
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
-#ifndef VP10_COMMON_ODINTRIN_H_
-#define VP10_COMMON_ODINTRIN_H_
+#ifndef AV1_COMMON_ODINTRIN_H_
+#define AV1_COMMON_ODINTRIN_H_
 
 #include "av1/common/enums.h"
 #include "aom/aom_integer.h"
diff --git a/av1/common/onyxc_int.h b/av1/common/onyxc_int.h
index 0b26d1605301ed2d0e18b34117ae5d8feae78444..83fead3a901ee0409348bb9bf6661c8cad26a1e7 100644
--- a/av1/common/onyxc_int.h
+++ b/av1/common/onyxc_int.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_ONYXC_INT_H_
-#define VP10_COMMON_ONYXC_INT_H_
+#ifndef AV1_COMMON_ONYXC_INT_H_
+#define AV1_COMMON_ONYXC_INT_H_
 
 #include "./aom_config.h"
 #include "aom/internal/aom_codec_internal.h"
@@ -122,7 +122,7 @@ typedef struct BufferPool {
   InternalFrameBufferList int_frame_buffers;
 } BufferPool;
 
-typedef struct VP10Common {
+typedef struct AV1Common {
   struct aom_internal_error_info error;
   aom_color_space_t color_space;
   int color_range;
@@ -139,7 +139,7 @@ typedef struct VP10Common {
   int subsampling_x;
   int subsampling_y;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int use_highbitdepth;  // Marks if we need to use 16bit frame buffers.
 #endif
 
@@ -229,9 +229,9 @@ typedef struct VP10Common {
   MODE_INFO *prev_mi;  /* 'mi' from last frame (points into prev_mip) */
 
   // Separate mi functions between encoder and decoder.
-  int (*alloc_mi)(struct VP10Common *cm, int mi_size);
-  void (*free_mi)(struct VP10Common *cm);
-  void (*setup_mi)(struct VP10Common *cm);
+  int (*alloc_mi)(struct AV1Common *cm, int mi_size);
+  void (*free_mi)(struct AV1Common *cm);
+  void (*setup_mi)(struct AV1Common *cm);
 
   // Grid of pointers to 8x8 MODE_INFO structs.  Any 8x8 not in the visible
   // area will be NULL.
@@ -316,7 +316,7 @@ typedef struct VP10Common {
 #if CONFIG_DERING
   int dering_level;
 #endif
-} VP10_COMMON;
+} AV1_COMMON;
 
 // TODO(hkuang): Don't need to lock the whole pool after implementing atomic
 // frame reference count.
@@ -336,18 +336,18 @@ static void unlock_buffer_pool(BufferPool *const pool) {
 #endif
 }
 
-static INLINE YV12_BUFFER_CONFIG *get_ref_frame(VP10_COMMON *cm, int index) {
+static INLINE YV12_BUFFER_CONFIG *get_ref_frame(AV1_COMMON *cm, int index) {
   if (index < 0 || index >= REF_FRAMES) return NULL;
   if (cm->ref_frame_map[index] < 0) return NULL;
   assert(cm->ref_frame_map[index] < FRAME_BUFFERS);
   return &cm->buffer_pool->frame_bufs[cm->ref_frame_map[index]].buf;
 }
 
-static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(VP10_COMMON *cm) {
+static INLINE YV12_BUFFER_CONFIG *get_frame_new_buffer(AV1_COMMON *cm) {
   return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
 }
 
-static INLINE int get_free_fb(VP10_COMMON *cm) {
+static INLINE int get_free_fb(AV1_COMMON *cm) {
   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
   int i;
 
@@ -381,11 +381,11 @@ static INLINE int mi_cols_aligned_to_sb(int n_mis) {
   return ALIGN_POWER_OF_TWO(n_mis, MI_BLOCK_SIZE_LOG2);
 }
 
-static INLINE int frame_is_intra_only(const VP10_COMMON *const cm) {
+static INLINE int frame_is_intra_only(const AV1_COMMON *const cm) {
   return cm->frame_type == KEY_FRAME || cm->intra_only;
 }
 
-static INLINE void vp10_init_macroblockd(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void av1_init_macroblockd(AV1_COMMON *cm, MACROBLOCKD *xd,
                                          tran_low_t *dqcoeff) {
   int i;
 
@@ -461,13 +461,13 @@ static INLINE void set_mi_row_col(MACROBLOCKD *xd, const TileInfo *const tile,
   }
 }
 
-static INLINE const aom_prob *get_y_mode_probs(const VP10_COMMON *cm,
+static INLINE const aom_prob *get_y_mode_probs(const AV1_COMMON *cm,
                                                const MODE_INFO *mi,
                                                const MODE_INFO *above_mi,
                                                const MODE_INFO *left_mi,
                                                int block) {
-  const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, block);
-  const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, block);
+  const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, block);
+  const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, block);
   return cm->kf_y_prob[above][left];
 }
 
@@ -504,4 +504,4 @@ static INLINE int partition_plane_context(const MACROBLOCKD *xd, int mi_row,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_ONYXC_INT_H_
+#endif  // AV1_COMMON_ONYXC_INT_H_
diff --git a/av1/common/pred_common.c b/av1/common/pred_common.c
index 590324b1e62d654998cb388dcde166f0db799def..508dac5cee137bb7e01c7c4c2ba4acca07d3b14a 100644
--- a/av1/common/pred_common.c
+++ b/av1/common/pred_common.c
@@ -13,7 +13,7 @@
 #include "av1/common/seg_common.h"
 
 // Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
   // Note:
   // The mode info data structure has a one element border above and to the
   // left of the entries correpsonding to real macroblocks.
@@ -44,7 +44,7 @@ int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd) {
 // 1 - intra/inter, inter/intra
 // 2 - intra/--, --/intra
 // 3 - intra/intra
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
+int av1_get_intra_inter_context(const MACROBLOCKD *xd) {
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
   const int has_above = xd->up_available;
@@ -61,7 +61,7 @@ int vp10_get_intra_inter_context(const MACROBLOCKD *xd) {
   }
 }
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
                                     const MACROBLOCKD *xd) {
   int ctx;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
@@ -104,7 +104,7 @@ int vp10_get_reference_mode_context(const VP10_COMMON *cm,
 }
 
 // Returns a context number for the given MB prediction signal
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
                                      const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
@@ -186,7 +186,7 @@ int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
   return pred_context;
 }
 
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
@@ -252,7 +252,7 @@ int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd) {
   return pred_context;
 }
 
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd) {
   int pred_context;
   const MB_MODE_INFO *const above_mbmi = xd->above_mbmi;
   const MB_MODE_INFO *const left_mbmi = xd->left_mbmi;
diff --git a/av1/common/pred_common.h b/av1/common/pred_common.h
index 56691ee65614781910614e4ad8aeebfc522d1b16..a07e3f2a18a73c64de5c4f6b0bbf444603d10c79 100644
--- a/av1/common/pred_common.h
+++ b/av1/common/pred_common.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_PRED_COMMON_H_
-#define VP10_COMMON_PRED_COMMON_H_
+#ifndef AV1_COMMON_PRED_COMMON_H_
+#define AV1_COMMON_PRED_COMMON_H_
 
 #include "av1/common/blockd.h"
 #include "av1/common/onyxc_int.h"
@@ -20,7 +20,7 @@
 extern "C" {
 #endif
 
-static INLINE int get_segment_id(const VP10_COMMON *cm,
+static INLINE int get_segment_id(const AV1_COMMON *cm,
                                  const uint8_t *segment_ids, BLOCK_SIZE bsize,
                                  int mi_row, int mi_col) {
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
@@ -39,7 +39,7 @@ static INLINE int get_segment_id(const VP10_COMMON *cm,
   return segment_id;
 }
 
-static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
+static INLINE int av1_get_pred_context_seg_id(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
   const int above_sip =
@@ -49,12 +49,12 @@ static INLINE int vp10_get_pred_context_seg_id(const MACROBLOCKD *xd) {
   return above_sip + left_sip;
 }
 
-static INLINE aom_prob vp10_get_pred_prob_seg_id(
+static INLINE aom_prob av1_get_pred_prob_seg_id(
     const struct segmentation_probs *segp, const MACROBLOCKD *xd) {
-  return segp->pred_probs[vp10_get_pred_context_seg_id(xd)];
+  return segp->pred_probs[av1_get_pred_context_seg_id(xd)];
 }
 
-static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
+static INLINE int av1_get_skip_context(const MACROBLOCKD *xd) {
   const MODE_INFO *const above_mi = xd->above_mi;
   const MODE_INFO *const left_mi = xd->left_mi;
   const int above_skip = (above_mi != NULL) ? above_mi->mbmi.skip : 0;
@@ -62,49 +62,49 @@ static INLINE int vp10_get_skip_context(const MACROBLOCKD *xd) {
   return above_skip + left_skip;
 }
 
-static INLINE aom_prob vp10_get_skip_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_skip_prob(const AV1_COMMON *cm,
                                           const MACROBLOCKD *xd) {
-  return cm->fc->skip_probs[vp10_get_skip_context(xd)];
+  return cm->fc->skip_probs[av1_get_skip_context(xd)];
 }
 
-int vp10_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
+int av1_get_pred_context_switchable_interp(const MACROBLOCKD *xd);
 
-int vp10_get_intra_inter_context(const MACROBLOCKD *xd);
+int av1_get_intra_inter_context(const MACROBLOCKD *xd);
 
-static INLINE aom_prob vp10_get_intra_inter_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_intra_inter_prob(const AV1_COMMON *cm,
                                                  const MACROBLOCKD *xd) {
-  return cm->fc->intra_inter_prob[vp10_get_intra_inter_context(xd)];
+  return cm->fc->intra_inter_prob[av1_get_intra_inter_context(xd)];
 }
 
-int vp10_get_reference_mode_context(const VP10_COMMON *cm,
+int av1_get_reference_mode_context(const AV1_COMMON *cm,
                                     const MACROBLOCKD *xd);
 
-static INLINE aom_prob vp10_get_reference_mode_prob(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_reference_mode_prob(const AV1_COMMON *cm,
                                                     const MACROBLOCKD *xd) {
-  return cm->fc->comp_inter_prob[vp10_get_reference_mode_context(cm, xd)];
+  return cm->fc->comp_inter_prob[av1_get_reference_mode_context(cm, xd)];
 }
 
-int vp10_get_pred_context_comp_ref_p(const VP10_COMMON *cm,
+int av1_get_pred_context_comp_ref_p(const AV1_COMMON *cm,
                                      const MACROBLOCKD *xd);
 
-static INLINE aom_prob vp10_get_pred_prob_comp_ref_p(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_comp_ref_p(const AV1_COMMON *cm,
                                                      const MACROBLOCKD *xd) {
-  const int pred_context = vp10_get_pred_context_comp_ref_p(cm, xd);
+  const int pred_context = av1_get_pred_context_comp_ref_p(cm, xd);
   return cm->fc->comp_ref_prob[pred_context];
 }
 
-int vp10_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p1(const MACROBLOCKD *xd);
 
-static INLINE aom_prob vp10_get_pred_prob_single_ref_p1(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_single_ref_p1(const AV1_COMMON *cm,
                                                         const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p1(xd)][0];
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p1(xd)][0];
 }
 
-int vp10_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
+int av1_get_pred_context_single_ref_p2(const MACROBLOCKD *xd);
 
-static INLINE aom_prob vp10_get_pred_prob_single_ref_p2(const VP10_COMMON *cm,
+static INLINE aom_prob av1_get_pred_prob_single_ref_p2(const AV1_COMMON *cm,
                                                         const MACROBLOCKD *xd) {
-  return cm->fc->single_ref_prob[vp10_get_pred_context_single_ref_p2(xd)][1];
+  return cm->fc->single_ref_prob[av1_get_pred_context_single_ref_p2(xd)][1];
 }
 
 // Returns a context number for the given MB prediction signal
@@ -158,4 +158,4 @@ static INLINE unsigned int *get_tx_counts(TX_SIZE max_tx_size, int ctx,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_PRED_COMMON_H_
+#endif  // AV1_COMMON_PRED_COMMON_H_
diff --git a/av1/common/quant_common.c b/av1/common/quant_common.c
index 7859052cc5f07c64928a7902fdce0b4df4fe69ab..2a5a9467a95af280d083dc6c4d64ab1f9836eef4 100644
--- a/av1/common/quant_common.c
+++ b/av1/common/quant_common.c
@@ -41,7 +41,7 @@ static const int16_t dc_qlookup[QINDEX_RANGE] = {
   1184, 1232, 1282, 1336,
 };
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const int16_t dc_qlookup_10[QINDEX_RANGE] = {
   4,    9,    10,   13,   15,   17,   20,   22,   25,   28,   31,   34,   37,
   40,   43,   47,   50,   53,   57,   60,   64,   68,   71,   75,   78,   82,
@@ -116,7 +116,7 @@ static const int16_t ac_qlookup[QINDEX_RANGE] = {
   1567, 1597, 1628, 1660, 1692, 1725, 1759, 1793, 1828,
 };
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const int16_t ac_qlookup_10[QINDEX_RANGE] = {
   4,    9,    11,   13,   16,   18,   21,   24,   27,   30,   33,   37,   40,
   44,   48,   51,   55,   59,   63,   67,   71,   75,   79,   83,   88,   92,
@@ -168,8 +168,8 @@ static const int16_t ac_qlookup_12[QINDEX_RANGE] = {
 };
 #endif
 
-int16_t vp10_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
-#if CONFIG_VPX_HIGHBITDEPTH
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
     case VPX_BITS_8: return dc_qlookup[clamp(qindex + delta, 0, MAXQ)];
     case VPX_BITS_10: return dc_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
@@ -184,8 +184,8 @@ int16_t vp10_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
 #endif
 }
 
-int16_t vp10_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
-#if CONFIG_VPX_HIGHBITDEPTH
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
     case VPX_BITS_8: return ac_qlookup[clamp(qindex + delta, 0, MAXQ)];
     case VPX_BITS_10: return ac_qlookup_10[clamp(qindex + delta, 0, MAXQ)];
@@ -200,7 +200,7 @@ int16_t vp10_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth) {
 #endif
 }
 
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
                     int base_qindex) {
   if (segfeature_active(seg, segment_id, SEG_LVL_ALT_Q)) {
     const int data = get_segdata(seg, segment_id, SEG_LVL_ALT_Q);
@@ -213,11 +213,11 @@ int vp10_get_qindex(const struct segmentation *seg, int segment_id,
 }
 
 #if CONFIG_AOM_QM
-qm_val_t* aom_iqmatrix(VP10_COMMON* cm, int qmlevel, int is_chroma,
+qm_val_t* aom_iqmatrix(AV1_COMMON* cm, int qmlevel, int is_chroma,
                        int log2sizem2, int is_intra) {
   return &cm->giqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
 }
-qm_val_t* aom_qmatrix(VP10_COMMON* cm, int qmlevel, int is_chroma,
+qm_val_t* aom_qmatrix(AV1_COMMON* cm, int qmlevel, int is_chroma,
                       int log2sizem2, int is_intra) {
   return &cm->gqmatrix[qmlevel][!!is_chroma][!!is_intra][log2sizem2][0];
 }
@@ -227,7 +227,7 @@ static uint16_t
 static uint16_t
     wt_matrix_ref[NUM_QM_LEVELS][2][2][4 * 4 + 8 * 8 + 16 * 16 + 32 * 32];
 
-void aom_qm_init(VP10_COMMON* cm) {
+void aom_qm_init(AV1_COMMON* cm) {
   int q, c, f, t, size;
   int current;
   for (q = 0; q < NUM_QM_LEVELS; ++q) {
diff --git a/av1/common/quant_common.h b/av1/common/quant_common.h
index c806c7aa07e631f06e5826cf4135fdaba4d6eb34..e6bb62e0affdfc0088d622688dac9b2612e92eb1 100644
--- a/av1/common/quant_common.h
+++ b/av1/common/quant_common.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_QUANT_COMMON_H_
-#define VP10_COMMON_QUANT_COMMON_H_
+#ifndef AV1_COMMON_QUANT_COMMON_H_
+#define AV1_COMMON_QUANT_COMMON_H_
 
 #include "aom/aom_codec.h"
 #include "av1/common/seg_common.h"
@@ -35,12 +35,12 @@ extern "C" {
 #define DEFAULT_QM_LAST (NUM_QM_LEVELS - 1)
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
-int16_t vp10_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
-int16_t vp10_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_dc_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
+int16_t av1_ac_quant(int qindex, int delta, aom_bit_depth_t bit_depth);
 
-int vp10_get_qindex(const struct segmentation *seg, int segment_id,
+int av1_get_qindex(const struct segmentation *seg, int segment_id,
                     int base_qindex);
 #if CONFIG_AOM_QM
 // Reduce the large number of quantizers to a smaller number of levels for which
@@ -50,10 +50,10 @@ static inline int aom_get_qmlevel(int qindex, int first, int last) {
   qmlevel = VPXMIN(qmlevel + first, NUM_QM_LEVELS - 1);
   return qmlevel;
 }
-void aom_qm_init(struct VP10Common *cm);
-qm_val_t *aom_iqmatrix(struct VP10Common *cm, int qindex, int comp,
+void aom_qm_init(struct AV1Common *cm);
+qm_val_t *aom_iqmatrix(struct AV1Common *cm, int qindex, int comp,
                        int log2sizem2, int is_intra);
-qm_val_t *aom_qmatrix(struct VP10Common *cm, int qindex, int comp,
+qm_val_t *aom_qmatrix(struct AV1Common *cm, int qindex, int comp,
                       int log2sizem2, int is_intra);
 #endif
 
@@ -61,4 +61,4 @@ qm_val_t *aom_qmatrix(struct VP10Common *cm, int qindex, int comp,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_QUANT_COMMON_H_
+#endif  // AV1_COMMON_QUANT_COMMON_H_
diff --git a/av1/common/reconinter.c b/av1/common/reconinter.c
index ee43ffe69ab421b5a6820ad1f530bc268502c784..4a9a98a38800e0c7d3499e29fbc9ad8bb972b8bc 100644
--- a/av1/common/reconinter.c
+++ b/av1/common/reconinter.c
@@ -20,8 +20,8 @@
 #include "av1/common/reconinter.h"
 #include "av1/common/reconintra.h"
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const MV *src_mv, const struct scale_factors *sf, int w, int h, int ref,
     const InterpKernel *kernel, enum mv_precision precision, int x, int y,
@@ -29,7 +29,7 @@ void vp10_highbd_build_inter_predictor(
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
-  MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+  MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
   const int subpel_x = mv.col & SUBPEL_MASK;
   const int subpel_y = mv.row & SUBPEL_MASK;
 
@@ -38,9 +38,9 @@ void vp10_highbd_build_inter_predictor(
   high_inter_predictor(src, src_stride, dst, dst_stride, subpel_x, subpel_y, sf,
                        w, h, ref, kernel, sf->x_step_q4, sf->y_step_q4, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride,
                                 uint8_t *dst, int dst_stride, const MV *src_mv,
                                 const struct scale_factors *sf, int w, int h,
                                 int ref, const InterpKernel *kernel,
@@ -48,7 +48,7 @@ void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
   const int is_q4 = precision == MV_PRECISION_Q4;
   const MV mv_q4 = { is_q4 ? src_mv->row : src_mv->row * 2,
                      is_q4 ? src_mv->col : src_mv->col * 2 };
-  MV32 mv = vp10_scale_mv(&mv_q4, x, y, sf);
+  MV32 mv = av1_scale_mv(&mv_q4, x, y, sf);
   const int subpel_x = mv.col & SUBPEL_MASK;
   const int subpel_y = mv.row & SUBPEL_MASK;
 
@@ -64,7 +64,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const MODE_INFO *mi = xd->mi[0];
   const int is_compound = has_second_ref(&mi->mbmi);
-  const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
+  const InterpKernel *kernel = av1_filter_kernels[mi->mbmi.interp_filter];
   int ref;
 
   for (ref = 0; ref < 1 + is_compound; ++ref) {
@@ -87,11 +87,11 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
     uint8_t *pre;
     MV32 scaled_mv;
     int xs, ys, subpel_x, subpel_y;
-    const int is_scaled = vp10_is_scaled(sf);
+    const int is_scaled = av1_is_scaled(sf);
 
     if (is_scaled) {
       pre = pre_buf->buf + scaled_buffer_offset(x, y, pre_buf->stride, sf);
-      scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+      scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
       xs = sf->x_step_q4;
       ys = sf->y_step_q4;
     } else {
@@ -105,7 +105,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
     pre += (scaled_mv.row >> SUBPEL_BITS) * pre_buf->stride +
            (scaled_mv.col >> SUBPEL_BITS);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       high_inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
                            subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
@@ -116,11 +116,11 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
 #else
     inter_predictor(pre, pre_buf->stride, dst, dst_buf->stride, subpel_x,
                     subpel_y, sf, w, h, ref, kernel, xs, ys);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 }
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
                                        int ir, int ic, int mi_row, int mi_col) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
   MODE_INFO *const mi = xd->mi[0];
@@ -131,32 +131,32 @@ void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
   uint8_t *const dst = &pd->dst.buf[(ir * pd->dst.stride + ic) << 2];
   int ref;
   const int is_compound = has_second_ref(&mi->mbmi);
-  const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
+  const InterpKernel *kernel = av1_filter_kernels[mi->mbmi.interp_filter];
 
   for (ref = 0; ref < 1 + is_compound; ++ref) {
     const uint8_t *pre =
         &pd->pre[ref].buf[(ir * pd->pre[ref].stride + ic) << 2];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-      vp10_highbd_build_inter_predictor(
+      av1_highbd_build_inter_predictor(
           pre, pd->pre[ref].stride, dst, pd->dst.stride,
           &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
           ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
           mi_row * MI_SIZE + 4 * ir, xd->bd);
     } else {
-      vp10_build_inter_predictor(
+      av1_build_inter_predictor(
           pre, pd->pre[ref].stride, dst, pd->dst.stride,
           &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
           ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
           mi_row * MI_SIZE + 4 * ir);
     }
 #else
-    vp10_build_inter_predictor(
+    av1_build_inter_predictor(
         pre, pd->pre[ref].stride, dst, pd->dst.stride,
         &mi->bmi[i].as_mv[ref].as_mv, &xd->block_refs[ref]->sf, width, height,
         ref, kernel, MV_PRECISION_Q3, mi_col * MI_SIZE + 4 * ic,
         mi_row * MI_SIZE + 4 * ir);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 }
 
@@ -193,29 +193,29 @@ static void build_inter_predictors_for_planes(MACROBLOCKD *xd, BLOCK_SIZE bsize,
   }
 }
 
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
                                      BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0, 0);
 }
 
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
                                      BLOCK_SIZE bsize, int plane) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, plane, plane);
 }
 
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
                                       BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 1,
                                     MAX_MB_PLANE - 1);
 }
 
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
                                     BLOCK_SIZE bsize) {
   build_inter_predictors_for_planes(xd, bsize, mi_row, mi_col, 0,
                                     MAX_MB_PLANE - 1);
 }
 
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
                            const YV12_BUFFER_CONFIG *src, int mi_row,
                            int mi_col) {
   uint8_t *const buffers[MAX_MB_PLANE] = { src->y_buffer, src->u_buffer,
@@ -231,7 +231,7 @@ void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
   }
 }
 
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
                            const YV12_BUFFER_CONFIG *src, int mi_row,
                            int mi_col, const struct scale_factors *sf) {
   if (src != NULL) {
diff --git a/av1/common/reconinter.h b/av1/common/reconinter.h
index 6996ea74436a42be1dac9cc3b5486965c36c4ad3..183ec30db3f93538767b4f271a07532a1d257463 100644
--- a/av1/common/reconinter.h
+++ b/av1/common/reconinter.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_RECONINTER_H_
-#define VP10_COMMON_RECONINTER_H_
+#ifndef AV1_COMMON_RECONINTER_H_
+#define AV1_COMMON_RECONINTER_H_
 
 #include "av1/common/filter.h"
 #include "av1/common/onyxc_int.h"
@@ -32,7 +32,7 @@ static INLINE void inter_predictor(const uint8_t *src, int src_stride,
       ys, w, h);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void high_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const int subpel_x, const int subpel_y, const struct scale_factors *sf,
@@ -41,7 +41,7 @@ static INLINE void high_inter_predictor(
       src, src_stride, dst, dst_stride, kernel[subpel_x], xs, kernel[subpel_y],
       ys, w, h, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static INLINE int round_mv_comp_q4(int value) {
   return (value < 0 ? value - 2 : value + 2) / 4;
@@ -113,29 +113,29 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane, int block, int bw,
                             int bh, int x, int y, int w, int h, int mi_x,
                             int mi_y);
 
-void vp10_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
+void av1_build_inter_predictor_sub8x8(MACROBLOCKD *xd, int plane, int i,
                                        int ir, int ic, int mi_row, int mi_col);
 
-void vp10_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sby(MACROBLOCKD *xd, int mi_row, int mi_col,
                                      BLOCK_SIZE bsize);
 
-void vp10_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbp(MACROBLOCKD *xd, int mi_row, int mi_col,
                                      BLOCK_SIZE bsize, int plane);
 
-void vp10_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sbuv(MACROBLOCKD *xd, int mi_row, int mi_col,
                                       BLOCK_SIZE bsize);
 
-void vp10_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
+void av1_build_inter_predictors_sb(MACROBLOCKD *xd, int mi_row, int mi_col,
                                     BLOCK_SIZE bsize);
 
-void vp10_build_inter_predictor(const uint8_t *src, int src_stride,
+void av1_build_inter_predictor(const uint8_t *src, int src_stride,
                                 uint8_t *dst, int dst_stride, const MV *mv_q3,
                                 const struct scale_factors *sf, int w, int h,
                                 int do_avg, const InterpKernel *kernel,
                                 enum mv_precision precision, int x, int y);
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_build_inter_predictor(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_build_inter_predictor(
     const uint8_t *src, int src_stride, uint8_t *dst, int dst_stride,
     const MV *mv_q3, const struct scale_factors *sf, int w, int h, int do_avg,
     const InterpKernel *kernel, enum mv_precision precision, int x, int y,
@@ -159,11 +159,11 @@ static INLINE void setup_pred_plane(struct buf_2d *dst, uint8_t *src,
   dst->stride = stride;
 }
 
-void vp10_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
+void av1_setup_dst_planes(struct macroblockd_plane planes[MAX_MB_PLANE],
                            const YV12_BUFFER_CONFIG *src, int mi_row,
                            int mi_col);
 
-void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
+void av1_setup_pre_planes(MACROBLOCKD *xd, int idx,
                            const YV12_BUFFER_CONFIG *src, int mi_row,
                            int mi_col, const struct scale_factors *sf);
 
@@ -171,4 +171,4 @@ void vp10_setup_pre_planes(MACROBLOCKD *xd, int idx,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RECONINTER_H_
+#endif  // AV1_COMMON_RECONINTER_H_
diff --git a/av1/common/reconintra.c b/av1/common/reconintra.c
index bebfb79902cf157f5def7fe372ce5ee24d48d247..d6e8605ce085fcd2f7f2f66ecf6772dd4ecf5397 100644
--- a/av1/common/reconintra.c
+++ b/av1/common/reconintra.c
@@ -12,9 +12,9 @@
 #include "./aom_config.h"
 #include "./aom_dsp_rtcd.h"
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #include "aom_dsp/aom_dsp_common.h"
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #include "aom_mem/aom_mem.h"
 #include "aom_ports/mem.h"
 #include "aom_ports/aom_once.h"
@@ -100,7 +100,7 @@ static const uint8_t *const orders[BLOCK_SIZES] = {
   orders_32x64, orders_64x32, orders_64x64,
 };
 
-static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
+static int av1_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
                           int right_available, TX_SIZE txsz, int y, int x,
                           int ss_x) {
   if (y == 0) {
@@ -133,7 +133,7 @@ static int vp10_has_right(BLOCK_SIZE bsize, int mi_row, int mi_col,
   }
 }
 
-static int vp10_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
+static int av1_has_bottom(BLOCK_SIZE bsize, int mi_row, int mi_col,
                            int bottom_available, TX_SIZE txsz, int y, int x,
                            int ss_y) {
   if (x == 0) {
@@ -171,15 +171,15 @@ typedef void (*intra_pred_fn)(uint8_t *dst, ptrdiff_t stride,
 static intra_pred_fn pred[INTRA_MODES][TX_SIZES];
 static intra_pred_fn dc_pred[2][2][TX_SIZES];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*intra_high_pred_fn)(uint16_t *dst, ptrdiff_t stride,
                                    const uint16_t *above, const uint16_t *left,
                                    int bd);
 static intra_high_pred_fn pred_high[INTRA_MODES][4];
 static intra_high_pred_fn dc_pred_high[2][2][4];
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void vp10_init_intra_predictors_internal(void) {
+static void av1_init_intra_predictors_internal(void) {
 #define INIT_NO_4X4(p, type)                  \
   p[TX_8X8] = aom_##type##_predictor_8x8;     \
   p[TX_16X16] = aom_##type##_predictor_16x16; \
@@ -210,7 +210,7 @@ static void vp10_init_intra_predictors_internal(void) {
   INIT_ALL_SIZES(dc_pred[1][0], dc_left);
   INIT_ALL_SIZES(dc_pred[1][1], dc);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   INIT_ALL_SIZES(pred_high[V_PRED], highbd_v);
   INIT_ALL_SIZES(pred_high[H_PRED], highbd_h);
 #if CONFIG_MISC_FIXES
@@ -231,7 +231,7 @@ static void vp10_init_intra_predictors_internal(void) {
   INIT_ALL_SIZES(dc_pred_high[0][1], highbd_dc_top);
   INIT_ALL_SIZES(dc_pred_high[1][0], highbd_dc_left);
   INIT_ALL_SIZES(dc_pred_high[1][1], highbd_dc);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #undef intra_pred_allsizes
 }
@@ -242,7 +242,7 @@ static INLINE void memset16(uint16_t *dst, int val, int n) {
 }
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void build_intra_predictors_high(const MACROBLOCKD *xd,
                                         const uint8_t *ref8, int ref_stride,
                                         uint8_t *dst8, int dst_stride,
@@ -465,7 +465,7 @@ static void build_intra_predictors_high(const MACROBLOCKD *xd,
                              xd->bd);
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
                                    int ref_stride, uint8_t *dst, int dst_stride,
@@ -675,7 +675,7 @@ static void build_intra_predictors(const MACROBLOCKD *xd, const uint8_t *ref,
   }
 }
 
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
                               TX_SIZE tx_size, PREDICTION_MODE mode,
                               const uint8_t *ref, int ref_stride, uint8_t *dst,
                               int dst_stride, int aoff, int loff, int plane) {
@@ -693,10 +693,10 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   const int right_available =
       mi_col + (bw >> !pd->subsampling_x) < xd->tile.mi_col_end;
-  const int have_right = vp10_has_right(bsize, mi_row, mi_col, right_available,
+  const int have_right = av1_has_right(bsize, mi_row, mi_col, right_available,
                                         tx_size, loff, aoff, pd->subsampling_x);
   const int have_bottom =
-      vp10_has_bottom(bsize, mi_row, mi_col, xd->mb_to_bottom_edge > 0, tx_size,
+      av1_has_bottom(bsize, mi_row, mi_col, xd->mb_to_bottom_edge > 0, tx_size,
                       loff, aoff, pd->subsampling_y);
   const int wpx = 4 * bw;
   const int hpx = 4 * bh;
@@ -711,7 +711,7 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
 #endif  // CONFIG_MISC_FIXES
 
 #if CONFIG_MISC_FIXES
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
                                 tx_size, have_top ? VPXMIN(txpx, xr + txpx) : 0,
@@ -730,7 +730,7 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
                          plane);
 #else  // CONFIG_MISC_FIXES
   (void)bhl_in;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     build_intra_predictors_high(xd, ref, ref_stride, dst, dst_stride, mode,
                                 tx_size, have_top, have_left, have_right, x, y,
@@ -743,6 +743,6 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
 #endif  // CONFIG_MISC_FIXES
 }
 
-void vp10_init_intra_predictors(void) {
-  once(vp10_init_intra_predictors_internal);
+void av1_init_intra_predictors(void) {
+  once(av1_init_intra_predictors_internal);
 }
diff --git a/av1/common/reconintra.h b/av1/common/reconintra.h
index 09d1d4b0097cc18cbb628240b38137f2f499e1a0..9a00b9bc3aa867c9d8597d3fbd7de29f2b419362 100644
--- a/av1/common/reconintra.h
+++ b/av1/common/reconintra.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_RECONINTRA_H_
-#define VP10_COMMON_RECONINTRA_H_
+#ifndef AV1_COMMON_RECONINTRA_H_
+#define AV1_COMMON_RECONINTRA_H_
 
 #include "aom/aom_integer.h"
 #include "av1/common/blockd.h"
@@ -19,9 +19,9 @@
 extern "C" {
 #endif
 
-void vp10_init_intra_predictors(void);
+void av1_init_intra_predictors(void);
 
-void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
+void av1_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
                               TX_SIZE tx_size, PREDICTION_MODE mode,
                               const uint8_t *ref, int ref_stride, uint8_t *dst,
                               int dst_stride, int aoff, int loff, int plane);
@@ -29,4 +29,4 @@ void vp10_predict_intra_block(const MACROBLOCKD *xd, int bwl_in, int bhl_in,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_RECONINTRA_H_
+#endif  // AV1_COMMON_RECONINTRA_H_
diff --git a/av1/common/scale.c b/av1/common/scale.c
index 75ae029255202cbc741a1ea7972e4c086a1e0564..b91c81d7601f634fcc8378fd0debff26b68135aa 100644
--- a/av1/common/scale.c
+++ b/av1/common/scale.c
@@ -35,7 +35,7 @@ static int get_fixed_point_scale_factor(int other_size, int this_size) {
   return (other_size << REF_SCALE_SHIFT) / this_size;
 }
 
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
   const int x_off_q4 = scaled_x(x << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const int y_off_q4 = scaled_y(y << SUBPEL_BITS, sf) & SUBPEL_MASK;
   const MV32 res = { scaled_y(mv->row, sf) + y_off_q4,
@@ -43,12 +43,12 @@ MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf) {
   return res;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
                                         int other_h, int this_w, int this_h,
                                         int use_highbd) {
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
                                         int other_h, int this_w, int this_h) {
 #endif
   if (!valid_ref_frame_size(other_w, other_h, this_w, this_h)) {
@@ -62,7 +62,7 @@ void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
   sf->x_step_q4 = scaled_x(16, sf);
   sf->y_step_q4 = scaled_y(16, sf);
 
-  if (vp10_is_scaled(sf)) {
+  if (av1_is_scaled(sf)) {
     sf->scale_value_x = scaled_x;
     sf->scale_value_y = scaled_y;
   } else {
@@ -116,7 +116,7 @@ void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
   // 2D subpel motion always gets filtered in both directions
   sf->predict[1][1][0] = aom_convolve8;
   sf->predict[1][1][1] = aom_convolve8_avg;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (use_highbd) {
     if (sf->x_step_q4 == 16) {
       if (sf->y_step_q4 == 16) {
diff --git a/av1/common/scale.h b/av1/common/scale.h
index e69d3381d1164bcbdd3ff3a561f3b86d3a9fecc9..29df9b6989113458325f37038020fbe414d7df5d 100644
--- a/av1/common/scale.h
+++ b/av1/common/scale.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_SCALE_H_
-#define VP10_COMMON_SCALE_H_
+#ifndef AV1_COMMON_SCALE_H_
+#define AV1_COMMON_SCALE_H_
 
 #include "av1/common/mv.h"
 #include "aom_dsp/aom_convolve.h"
@@ -33,29 +33,29 @@ struct scale_factors {
   int (*scale_value_y)(int val, const struct scale_factors *sf);
 
   convolve_fn_t predict[2][2][2];  // horiz, vert, avg
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_convolve_fn_t highbd_predict[2][2][2];  // horiz, vert, avg
 #endif
 };
 
-MV32 vp10_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
+MV32 av1_scale_mv(const MV *mv, int x, int y, const struct scale_factors *sf);
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
                                         int other_h, int this_w, int this_h,
                                         int use_high);
 #else
-void vp10_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
+void av1_setup_scale_factors_for_frame(struct scale_factors *sf, int other_w,
                                         int other_h, int this_w, int this_h);
 #endif
 
-static INLINE int vp10_is_valid_scale(const struct scale_factors *sf) {
+static INLINE int av1_is_valid_scale(const struct scale_factors *sf) {
   return sf->x_scale_fp != REF_INVALID_SCALE &&
          sf->y_scale_fp != REF_INVALID_SCALE;
 }
 
-static INLINE int vp10_is_scaled(const struct scale_factors *sf) {
-  return vp10_is_valid_scale(sf) &&
+static INLINE int av1_is_scaled(const struct scale_factors *sf) {
+  return av1_is_valid_scale(sf) &&
          (sf->x_scale_fp != REF_NO_SCALE || sf->y_scale_fp != REF_NO_SCALE);
 }
 
@@ -69,4 +69,4 @@ static INLINE int valid_ref_frame_size(int ref_width, int ref_height,
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SCALE_H_
+#endif  // AV1_COMMON_SCALE_H_
diff --git a/av1/common/scan.c b/av1/common/scan.c
index cddc5575caee02784766992b8e521cde76aa2364..aac80d90feffcace23c186e16257b85044afbce9 100644
--- a/av1/common/scan.c
+++ b/av1/common/scan.c
@@ -514,40 +514,40 @@ DECLARE_ALIGNED(16, static const int16_t,
   959, 990,  991, 1022, 0,   0,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_4x4[16]) = {
   0, 2, 5, 8, 1, 3, 9, 12, 4, 7, 11, 14, 6, 10, 13, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_4x4[16]) = {
   0, 3, 7, 11, 1, 5, 9, 12, 2, 6, 10, 14, 4, 8, 13, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_4x4[16]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_4x4[16]) = {
   0, 1, 3, 5, 2, 4, 6, 9, 7, 8, 11, 13, 10, 12, 14, 15,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_8x8[64]) = {
   0,  3,  8,  15, 22, 32, 40, 47, 1,  5,  11, 18, 26, 34, 44, 51,
   2,  7,  13, 20, 28, 38, 46, 54, 4,  10, 16, 24, 31, 41, 50, 56,
   6,  12, 21, 27, 35, 43, 52, 58, 9,  17, 25, 33, 39, 48, 55, 60,
   14, 23, 30, 37, 45, 53, 59, 62, 19, 29, 36, 42, 49, 57, 61, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_8x8[64]) = {
   0,  1,  2,  5,  8,  12, 19, 24, 3,  4,  7,  10, 15, 20, 30, 39,
   6,  9,  13, 16, 21, 27, 37, 46, 11, 14, 17, 23, 28, 34, 44, 52,
   18, 22, 25, 31, 35, 41, 50, 57, 26, 29, 33, 38, 43, 49, 55, 59,
   32, 36, 42, 47, 51, 54, 60, 61, 40, 45, 48, 53, 56, 58, 62, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_8x8[64]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_8x8[64]) = {
   0,  2,  5,  9,  14, 22, 31, 37, 1,  4,  8,  13, 19, 26, 38, 44,
   3,  6,  10, 17, 24, 30, 42, 49, 7,  11, 15, 21, 29, 36, 47, 53,
   12, 16, 20, 27, 34, 43, 52, 57, 18, 23, 28, 35, 41, 48, 56, 60,
   25, 32, 39, 45, 50, 55, 59, 62, 33, 40, 46, 51, 54, 58, 61, 63,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_col_iscan_16x16[256]) = {
   0,  4,  11,  20,  31,  43,  59,  75,  85,  109, 130, 150, 165, 181, 195, 198,
   1,  6,  14,  23,  34,  47,  64,  81,  95,  114, 135, 153, 171, 188, 201, 212,
   2,  8,  16,  25,  38,  52,  67,  83,  101, 116, 136, 157, 172, 190, 205, 216,
@@ -566,7 +566,7 @@ DECLARE_ALIGNED(16, static const int16_t, vp10_col_iscan_16x16[256]) = {
   65, 88, 107, 124, 139, 152, 163, 177, 185, 199, 221, 234, 243, 248, 252, 255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_row_iscan_16x16[256]) = {
   0,   1,   2,   4,   6,   9,   12,  17,  22,  29,  36,  43,  54,  64,  76,
   86,  3,   5,   7,   11,  15,  19,  25,  32,  38,  48,  59,  68,  84,  99,
   115, 130, 8,   10,  13,  18,  23,  27,  33,  42,  51,  60,  72,  88,  103,
@@ -587,7 +587,7 @@ DECLARE_ALIGNED(16, static const int16_t, vp10_row_iscan_16x16[256]) = {
   255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_16x16[256]) = {
   0,   2,   5,   9,   17,  24,  36,  44,  55,  72,  88,  104, 128, 143, 166,
   179, 1,   4,   8,   13,  20,  30,  40,  54,  66,  79,  96,  113, 141, 154,
   178, 196, 3,   7,   11,  18,  25,  33,  46,  57,  71,  86,  101, 119, 148,
@@ -608,7 +608,7 @@ DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_16x16[256]) = {
   255,
 };
 
-DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
+DECLARE_ALIGNED(16, static const int16_t, av1_default_iscan_32x32[1024]) = {
   0,    2,    5,    10,   17,   25,   38,   47,   62,   83,   101,  121,  145,
   170,  193,  204,  210,  219,  229,  233,  245,  257,  275,  299,  342,  356,
   377,  405,  455,  471,  495,  527,  1,    4,    8,    15,   22,   30,   45,
@@ -690,40 +690,40 @@ DECLARE_ALIGNED(16, static const int16_t, vp10_default_iscan_32x32[1024]) = {
   967,  973,  988,  996,  1002, 1006, 1014, 1018, 1021, 1023,
 };
 
-const scan_order vp10_default_scan_orders[TX_SIZES] = {
-  { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-  { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-  { default_scan_16x16, vp10_default_iscan_16x16,
+const scan_order av1_default_scan_orders[TX_SIZES] = {
+  { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+  { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+  { default_scan_16x16, av1_default_iscan_16x16,
     default_scan_16x16_neighbors },
-  { default_scan_32x32, vp10_default_iscan_32x32,
+  { default_scan_32x32, av1_default_iscan_32x32,
     default_scan_32x32_neighbors },
 };
 
-const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES] = {
+const scan_order av1_scan_orders[TX_SIZES][TX_TYPES] = {
   { // TX_4X4
-    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors },
-    { row_scan_4x4, vp10_row_iscan_4x4, row_scan_4x4_neighbors },
-    { col_scan_4x4, vp10_col_iscan_4x4, col_scan_4x4_neighbors },
-    { default_scan_4x4, vp10_default_iscan_4x4, default_scan_4x4_neighbors } },
+    { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors },
+    { row_scan_4x4, av1_row_iscan_4x4, row_scan_4x4_neighbors },
+    { col_scan_4x4, av1_col_iscan_4x4, col_scan_4x4_neighbors },
+    { default_scan_4x4, av1_default_iscan_4x4, default_scan_4x4_neighbors } },
   { // TX_8X8
-    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors },
-    { row_scan_8x8, vp10_row_iscan_8x8, row_scan_8x8_neighbors },
-    { col_scan_8x8, vp10_col_iscan_8x8, col_scan_8x8_neighbors },
-    { default_scan_8x8, vp10_default_iscan_8x8, default_scan_8x8_neighbors } },
+    { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors },
+    { row_scan_8x8, av1_row_iscan_8x8, row_scan_8x8_neighbors },
+    { col_scan_8x8, av1_col_iscan_8x8, col_scan_8x8_neighbors },
+    { default_scan_8x8, av1_default_iscan_8x8, default_scan_8x8_neighbors } },
   { // TX_16X16
-    { default_scan_16x16, vp10_default_iscan_16x16,
+    { default_scan_16x16, av1_default_iscan_16x16,
       default_scan_16x16_neighbors },
-    { row_scan_16x16, vp10_row_iscan_16x16, row_scan_16x16_neighbors },
-    { col_scan_16x16, vp10_col_iscan_16x16, col_scan_16x16_neighbors },
-    { default_scan_16x16, vp10_default_iscan_16x16,
+    { row_scan_16x16, av1_row_iscan_16x16, row_scan_16x16_neighbors },
+    { col_scan_16x16, av1_col_iscan_16x16, col_scan_16x16_neighbors },
+    { default_scan_16x16, av1_default_iscan_16x16,
       default_scan_16x16_neighbors } },
   { // TX_32X32
-    { default_scan_32x32, vp10_default_iscan_32x32,
+    { default_scan_32x32, av1_default_iscan_32x32,
       default_scan_32x32_neighbors },
-    { default_scan_32x32, vp10_default_iscan_32x32,
+    { default_scan_32x32, av1_default_iscan_32x32,
       default_scan_32x32_neighbors },
-    { default_scan_32x32, vp10_default_iscan_32x32,
+    { default_scan_32x32, av1_default_iscan_32x32,
       default_scan_32x32_neighbors },
-    { default_scan_32x32, vp10_default_iscan_32x32,
+    { default_scan_32x32, av1_default_iscan_32x32,
       default_scan_32x32_neighbors } },
 };
diff --git a/av1/common/scan.h b/av1/common/scan.h
index 22d21b192142d5b2379566a158b8e323a9309a63..27cb99da8cfdce91c0eccad46725a1a98e644ea9 100644
--- a/av1/common/scan.h
+++ b/av1/common/scan.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_SCAN_H_
-#define VP10_COMMON_SCAN_H_
+#ifndef AV1_COMMON_SCAN_H_
+#define AV1_COMMON_SCAN_H_
 
 #include "aom/aom_integer.h"
 #include "aom_ports/mem.h"
@@ -30,8 +30,8 @@ typedef struct {
   const int16_t *neighbors;
 } scan_order;
 
-extern const scan_order vp10_default_scan_orders[TX_SIZES];
-extern const scan_order vp10_scan_orders[TX_SIZES][TX_TYPES];
+extern const scan_order av1_default_scan_orders[TX_SIZES];
+extern const scan_order av1_scan_orders[TX_SIZES][TX_TYPES];
 
 static INLINE int get_coef_context(const int16_t *neighbors,
                                    const uint8_t *token_cache, int c) {
@@ -41,11 +41,11 @@ static INLINE int get_coef_context(const int16_t *neighbors,
 }
 
 static INLINE const scan_order *get_scan(TX_SIZE tx_size, TX_TYPE tx_type) {
-  return &vp10_scan_orders[tx_size][tx_type];
+  return &av1_scan_orders[tx_size][tx_type];
 }
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SCAN_H_
+#endif  // AV1_COMMON_SCAN_H_
diff --git a/av1/common/seg_common.c b/av1/common/seg_common.c
index c7f428b12bada9d9da05b4053b3d17c9832d6353..369a3e1d5161214cb89ded22567cab4387f8f2f1 100644
--- a/av1/common/seg_common.c
+++ b/av1/common/seg_common.c
@@ -26,25 +26,25 @@ static const int seg_feature_data_max[SEG_LVL_MAX] = { MAXQ, MAX_LOOP_FILTER, 3,
 // the coding mechanism is still subject to change so these provide a
 // convenient single point of change.
 
-void vp10_clearall_segfeatures(struct segmentation *seg) {
-  vp10_zero(seg->feature_data);
-  vp10_zero(seg->feature_mask);
+void av1_clearall_segfeatures(struct segmentation *seg) {
+  av1_zero(seg->feature_data);
+  av1_zero(seg->feature_mask);
 }
 
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
                             SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] |= 1 << feature_id;
 }
 
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_max[feature_id];
 }
 
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id) {
   return seg_feature_data_signed[feature_id];
 }
 
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
+void av1_set_segdata(struct segmentation *seg, int segment_id,
                       SEG_LVL_FEATURES feature_id, int seg_data) {
   assert(seg_data <= seg_feature_data_max[feature_id]);
   if (seg_data < 0) {
@@ -55,7 +55,7 @@ void vp10_set_segdata(struct segmentation *seg, int segment_id,
   seg->feature_data[segment_id][feature_id] = seg_data;
 }
 
-const aom_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
+const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)] = {
   2, 4, 6, 8, 10, 12, 0, -1, -2, -3, -4, -5, -6, -7
 };
 
diff --git a/av1/common/seg_common.h b/av1/common/seg_common.h
index 9a0f2c2486bc6b6fc53693b0ff19039afb5ad914..eda022f7b588ce971fad9e5594279da7cb6c5943 100644
--- a/av1/common/seg_common.h
+++ b/av1/common/seg_common.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_SEG_COMMON_H_
-#define VP10_COMMON_SEG_COMMON_H_
+#ifndef AV1_COMMON_SEG_COMMON_H_
+#define AV1_COMMON_SEG_COMMON_H_
 
 #include "aom_dsp/prob.h"
 
@@ -57,16 +57,16 @@ static INLINE int segfeature_active(const struct segmentation *seg,
   return seg->enabled && (seg->feature_mask[segment_id] & (1 << feature_id));
 }
 
-void vp10_clearall_segfeatures(struct segmentation *seg);
+void av1_clearall_segfeatures(struct segmentation *seg);
 
-void vp10_enable_segfeature(struct segmentation *seg, int segment_id,
+void av1_enable_segfeature(struct segmentation *seg, int segment_id,
                             SEG_LVL_FEATURES feature_id);
 
-int vp10_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
+int av1_seg_feature_data_max(SEG_LVL_FEATURES feature_id);
 
-int vp10_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
+int av1_is_segfeature_signed(SEG_LVL_FEATURES feature_id);
 
-void vp10_set_segdata(struct segmentation *seg, int segment_id,
+void av1_set_segdata(struct segmentation *seg, int segment_id,
                       SEG_LVL_FEATURES feature_id, int seg_data);
 
 static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
@@ -74,10 +74,10 @@ static INLINE int get_segdata(const struct segmentation *seg, int segment_id,
   return seg->feature_data[segment_id][feature_id];
 }
 
-extern const aom_tree_index vp10_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
+extern const aom_tree_index av1_segment_tree[TREE_SIZE(MAX_SEGMENTS)];
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_SEG_COMMON_H_
+#endif  // AV1_COMMON_SEG_COMMON_H_
diff --git a/av1/common/thread_common.c b/av1/common/thread_common.c
index 502ba50a6d6fa508ab4831f5a029b1aa9f05794e..f068cf4237d23a48e699545014682182c5322dbf 100644
--- a/av1/common/thread_common.c
+++ b/av1/common/thread_common.c
@@ -34,7 +34,7 @@ static INLINE void mutex_lock(pthread_mutex_t *const mutex) {
 }
 #endif  // CONFIG_MULTITHREAD
 
-static INLINE void sync_read(VP10LfSync *const lf_sync, int r, int c) {
+static INLINE void sync_read(AV1LfSync *const lf_sync, int r, int c) {
 #if CONFIG_MULTITHREAD
   const int nsync = lf_sync->sync_range;
 
@@ -54,7 +54,7 @@ static INLINE void sync_read(VP10LfSync *const lf_sync, int r, int c) {
 #endif  // CONFIG_MULTITHREAD
 }
 
-static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
+static INLINE void sync_write(AV1LfSync *const lf_sync, int r, int c,
                               const int sb_cols) {
 #if CONFIG_MULTITHREAD
   const int nsync = lf_sync->sync_range;
@@ -87,9 +87,9 @@ static INLINE void sync_write(VP10LfSync *const lf_sync, int r, int c,
 
 // Implement row loopfiltering for each thread.
 static INLINE void thread_loop_filter_rows(
-    const YV12_BUFFER_CONFIG *const frame_buffer, VP10_COMMON *const cm,
+    const YV12_BUFFER_CONFIG *const frame_buffer, AV1_COMMON *const cm,
     struct macroblockd_plane planes[MAX_MB_PLANE], int start, int stop,
-    int y_only, VP10LfSync *const lf_sync) {
+    int y_only, AV1LfSync *const lf_sync) {
   const int num_planes = y_only ? 1 : MAX_MB_PLANE;
   const int sb_cols = mi_cols_aligned_to_sb(cm->mi_cols) >> MI_BLOCK_SIZE_LOG2;
   int mi_row, mi_col;
@@ -115,22 +115,22 @@ static INLINE void thread_loop_filter_rows(
 
       sync_read(lf_sync, r, c);
 
-      vp10_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
+      av1_setup_dst_planes(planes, frame_buffer, mi_row, mi_col);
 
       // TODO(JBB): Make setup_mask work for non 420.
-      vp10_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
+      av1_setup_mask(cm, mi_row, mi_col, mi + mi_col, cm->mi_stride, &lfm);
 
-      vp10_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
+      av1_filter_block_plane_ss00(cm, &planes[0], mi_row, &lfm);
       for (plane = 1; plane < num_planes; ++plane) {
         switch (path) {
           case LF_PATH_420:
-            vp10_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss11(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_444:
-            vp10_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
+            av1_filter_block_plane_ss00(cm, &planes[plane], mi_row, &lfm);
             break;
           case LF_PATH_SLOW:
-            vp10_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
+            av1_filter_block_plane_non420(cm, &planes[plane], mi + mi_col,
                                            mi_row, mi_col);
             break;
         }
@@ -142,7 +142,7 @@ static INLINE void thread_loop_filter_rows(
 }
 
 // Row-based multi-threaded loopfilter hook
-static int loop_filter_row_worker(VP10LfSync *const lf_sync,
+static int loop_filter_row_worker(AV1LfSync *const lf_sync,
                                   LFWorkerData *const lf_data) {
   thread_loop_filter_rows(lf_data->frame_buffer, lf_data->cm, lf_data->planes,
                           lf_data->start, lf_data->stop, lf_data->y_only,
@@ -150,11 +150,11 @@ static int loop_filter_row_worker(VP10LfSync *const lf_sync,
   return 1;
 }
 
-static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                                 struct macroblockd_plane planes[MAX_MB_PLANE],
                                 int start, int stop, int y_only,
                                 VPxWorker *workers, int nworkers,
-                                VP10LfSync *lf_sync) {
+                                AV1LfSync *lf_sync) {
   const VPxWorkerInterface *const winterface = aom_get_worker_interface();
   // Number of superblock rows and cols
   const int sb_rows = mi_cols_aligned_to_sb(cm->mi_rows) >> MI_BLOCK_SIZE_LOG2;
@@ -166,8 +166,8 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
 
   if (!lf_sync->sync_range || sb_rows != lf_sync->rows ||
       num_workers > lf_sync->num_workers) {
-    vp10_loop_filter_dealloc(lf_sync);
-    vp10_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
+    av1_loop_filter_dealloc(lf_sync);
+    av1_loop_filter_alloc(lf_sync, cm, sb_rows, cm->width, num_workers);
   }
 
   // Initialize cur_sb_col to -1 for all SB rows.
@@ -190,7 +190,7 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
     worker->data2 = lf_data;
 
     // Loopfilter data
-    vp10_loop_filter_data_reset(lf_data, frame, cm, planes);
+    av1_loop_filter_data_reset(lf_data, frame, cm, planes);
     lf_data->start = start + i * MI_BLOCK_SIZE;
     lf_data->stop = stop;
     lf_data->y_only = y_only;
@@ -209,11 +209,11 @@ static void loop_filter_rows_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
   }
 }
 
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, AV1_COMMON *cm,
                                struct macroblockd_plane planes[MAX_MB_PLANE],
                                int frame_filter_level, int y_only,
                                int partial_frame, VPxWorker *workers,
-                               int num_workers, VP10LfSync *lf_sync) {
+                               int num_workers, AV1LfSync *lf_sync) {
   int start_mi_row, end_mi_row, mi_rows_to_filter;
 
   if (!frame_filter_level) return;
@@ -226,7 +226,7 @@ void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, VP10_COMMON *cm,
     mi_rows_to_filter = VPXMAX(cm->mi_rows / 8, 8);
   }
   end_mi_row = start_mi_row + mi_rows_to_filter;
-  vp10_loop_filter_frame_init(cm, frame_filter_level);
+  av1_loop_filter_frame_init(cm, frame_filter_level);
 
   loop_filter_rows_mt(frame, cm, planes, start_mi_row, end_mi_row, y_only,
                       workers, num_workers, lf_sync);
@@ -247,7 +247,7 @@ static INLINE int get_sync_range(int width) {
 }
 
 // Allocate memory for lf row synchronization
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, AV1_COMMON *cm, int rows,
                             int width, int num_workers) {
   lf_sync->rows = rows;
 #if CONFIG_MULTITHREAD
@@ -284,7 +284,7 @@ void vp10_loop_filter_alloc(VP10LfSync *lf_sync, VP10_COMMON *cm, int rows,
 }
 
 // Deallocate lf synchronization related mutex and data
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync) {
   if (lf_sync != NULL) {
 #if CONFIG_MULTITHREAD
     int i;
@@ -306,12 +306,12 @@ void vp10_loop_filter_dealloc(VP10LfSync *lf_sync) {
     aom_free(lf_sync->cur_sb_col);
     // clear the structure as the source of this call may be a resize in which
     // case this call will be followed by an _alloc() which may fail.
-    vp10_zero(*lf_sync);
+    av1_zero(*lf_sync);
   }
 }
 
 // Accumulate frame counts.
-void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
+void av1_accumulate_frame_counts(AV1_COMMON *cm, FRAME_COUNTS *counts,
                                   int is_dec) {
   int i, j, k, l, m;
 
diff --git a/av1/common/thread_common.h b/av1/common/thread_common.h
index c514865c46169db7e61359eaa68b288c0c525143..5b0dfc0fb78ebdcafe8b02a81027c922ef32580b 100644
--- a/av1/common/thread_common.h
+++ b/av1/common/thread_common.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_LOOPFILTER_THREAD_H_
-#define VP10_COMMON_LOOPFILTER_THREAD_H_
+#ifndef AV1_COMMON_LOOPFILTER_THREAD_H_
+#define AV1_COMMON_LOOPFILTER_THREAD_H_
 #include "./aom_config.h"
 #include "av1/common/loopfilter.h"
 #include "aom_util/aom_thread.h"
@@ -19,11 +19,11 @@
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 struct FRAME_COUNTS;
 
 // Loopfilter row synchronization
-typedef struct VP10LfSyncData {
+typedef struct AV1LfSyncData {
 #if CONFIG_MULTITHREAD
   pthread_mutex_t *mutex_;
   pthread_cond_t *cond_;
@@ -38,27 +38,27 @@ typedef struct VP10LfSyncData {
   // Row-based parallel loopfilter data
   LFWorkerData *lfdata;
   int num_workers;
-} VP10LfSync;
+} AV1LfSync;
 
 // Allocate memory for loopfilter row synchronization.
-void vp10_loop_filter_alloc(VP10LfSync *lf_sync, struct VP10Common *cm,
+void av1_loop_filter_alloc(AV1LfSync *lf_sync, struct AV1Common *cm,
                             int rows, int width, int num_workers);
 
 // Deallocate loopfilter synchronization related mutex and data.
-void vp10_loop_filter_dealloc(VP10LfSync *lf_sync);
+void av1_loop_filter_dealloc(AV1LfSync *lf_sync);
 
 // Multi-threaded loopfilter that uses the tile threads.
-void vp10_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct VP10Common *cm,
+void av1_loop_filter_frame_mt(YV12_BUFFER_CONFIG *frame, struct AV1Common *cm,
                                struct macroblockd_plane planes[MAX_MB_PLANE],
                                int frame_filter_level, int y_only,
                                int partial_frame, VPxWorker *workers,
-                               int num_workers, VP10LfSync *lf_sync);
+                               int num_workers, AV1LfSync *lf_sync);
 
-void vp10_accumulate_frame_counts(struct VP10Common *cm,
+void av1_accumulate_frame_counts(struct AV1Common *cm,
                                   struct FRAME_COUNTS *counts, int is_dec);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_LOOPFILTER_THREAD_H_
+#endif  // AV1_COMMON_LOOPFILTER_THREAD_H_
diff --git a/av1/common/tile_common.c b/av1/common/tile_common.c
index e0a0572825b906ff6e34f8d67d57cbdcf12a2f0f..ebe6d67748ed2cb6238aa8369ea5cfbd686faca7 100644
--- a/av1/common/tile_common.c
+++ b/av1/common/tile_common.c
@@ -22,19 +22,19 @@ static int get_tile_offset(int idx, int mis, int log2) {
   return VPXMIN(offset, mis);
 }
 
-void vp10_tile_set_row(TileInfo *tile, const VP10_COMMON *cm, int row) {
+void av1_tile_set_row(TileInfo *tile, const AV1_COMMON *cm, int row) {
   tile->mi_row_start = get_tile_offset(row, cm->mi_rows, cm->log2_tile_rows);
   tile->mi_row_end = get_tile_offset(row + 1, cm->mi_rows, cm->log2_tile_rows);
 }
 
-void vp10_tile_set_col(TileInfo *tile, const VP10_COMMON *cm, int col) {
+void av1_tile_set_col(TileInfo *tile, const AV1_COMMON *cm, int col) {
   tile->mi_col_start = get_tile_offset(col, cm->mi_cols, cm->log2_tile_cols);
   tile->mi_col_end = get_tile_offset(col + 1, cm->mi_cols, cm->log2_tile_cols);
 }
 
-void vp10_tile_init(TileInfo *tile, const VP10_COMMON *cm, int row, int col) {
-  vp10_tile_set_row(tile, cm, row);
-  vp10_tile_set_col(tile, cm, col);
+void av1_tile_init(TileInfo *tile, const AV1_COMMON *cm, int row, int col) {
+  av1_tile_set_row(tile, cm, row);
+  av1_tile_set_col(tile, cm, col);
 }
 
 static int get_min_log2_tile_cols(const int sb64_cols) {
@@ -49,7 +49,7 @@ static int get_max_log2_tile_cols(const int sb64_cols) {
   return max_log2 - 1;
 }
 
-void vp10_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
+void av1_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
                           int *max_log2_tile_cols) {
   const int sb64_cols = mi_cols_aligned_to_sb(mi_cols) >> MI_BLOCK_SIZE_LOG2;
   *min_log2_tile_cols = get_min_log2_tile_cols(sb64_cols);
diff --git a/av1/common/tile_common.h b/av1/common/tile_common.h
index ca77ecc78e0352160bb6d383fd6aa5a828ef18a9..23a7ce00ed2e8db5f7f9b6fc46366c0065c8a538 100644
--- a/av1/common/tile_common.h
+++ b/av1/common/tile_common.h
@@ -9,14 +9,14 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_COMMON_TILE_COMMON_H_
-#define VP10_COMMON_TILE_COMMON_H_
+#ifndef AV1_COMMON_TILE_COMMON_H_
+#define AV1_COMMON_TILE_COMMON_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Common;
+struct AV1Common;
 
 typedef struct TileInfo {
   int mi_row_start, mi_row_end;
@@ -25,17 +25,17 @@ typedef struct TileInfo {
 
 // initializes 'tile->mi_(row|col)_(start|end)' for (row, col) based on
 // 'cm->log2_tile_(rows|cols)' & 'cm->mi_(rows|cols)'
-void vp10_tile_init(TileInfo *tile, const struct VP10Common *cm, int row,
+void av1_tile_init(TileInfo *tile, const struct AV1Common *cm, int row,
                     int col);
 
-void vp10_tile_set_row(TileInfo *tile, const struct VP10Common *cm, int row);
-void vp10_tile_set_col(TileInfo *tile, const struct VP10Common *cm, int col);
+void av1_tile_set_row(TileInfo *tile, const struct AV1Common *cm, int row);
+void av1_tile_set_col(TileInfo *tile, const struct AV1Common *cm, int col);
 
-void vp10_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
+void av1_get_tile_n_bits(int mi_cols, int *min_log2_tile_cols,
                           int *max_log2_tile_cols);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_COMMON_TILE_COMMON_H_
+#endif  // AV1_COMMON_TILE_COMMON_H_
diff --git a/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
index b6c38aa33807323b8ad0f8b2930c3a105e916a99..876e57973f6b88d2e648ac64d392499c128b7ed2 100644
--- a/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
+++ b/av1/common/x86/av1_fwd_dct32x32_impl_sse2.h
@@ -23,31 +23,31 @@
 #define ADD_EPI16 _mm_adds_epi16
 #define SUB_EPI16 _mm_subs_epi16
 #if FDCT32x32_HIGH_PRECISION
-void vp10_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vp10_fdct32(temp_in, temp_out, 0);
+    av1_fdct32(temp_in, temp_out, 0);
     for (j = 0; j < 32; ++j)
       out[j + i * 32] =
           (tran_low_t)((temp_out[j] + 1 + (temp_out[j] < 0)) >> 2);
   }
 }
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rows_c
 #else
-void vp10_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
+void av1_fdct32x32_rd_rows_c(const int16_t *intermediate, tran_low_t *out) {
   int i, j;
   for (i = 0; i < 32; ++i) {
     tran_high_t temp_in[32], temp_out[32];
     for (j = 0; j < 32; ++j) temp_in[j] = intermediate[j * 32 + i];
-    vp10_fdct32(temp_in, temp_out, 1);
+    av1_fdct32(temp_in, temp_out, 1);
     for (j = 0; j < 32; ++j) out[j + i * 32] = (tran_low_t)temp_out[j];
   }
 }
-#define HIGH_FDCT32x32_2D_C vp10_highbd_fdct32x32_rd_c
-#define HIGH_FDCT32x32_2D_ROWS_C vp10_fdct32x32_rd_rows_c
+#define HIGH_FDCT32x32_2D_C av1_highbd_fdct32x32_rd_c
+#define HIGH_FDCT32x32_2D_ROWS_C av1_fdct32x32_rd_rows_c
 #endif  // FDCT32x32_HIGH_PRECISION
 #else
 #define ADD_EPI16 _mm_add_epi16
diff --git a/av1/common/x86/av1_fwd_txfm_sse2.c b/av1/common/x86/av1_fwd_txfm_sse2.c
index ee78f898a08d8212db7e30d79ac263a3f79787a0..c4d0b0c03a5a980c8d4e0e5b84ce4df6f95052de 100644
--- a/av1/common/x86/av1_fwd_txfm_sse2.c
+++ b/av1/common/x86/av1_fwd_txfm_sse2.c
@@ -15,7 +15,7 @@
 #include "aom_dsp/aom_dsp_common.h"
 #include "aom_dsp/x86/fwd_txfm_sse2.h"
 
-void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0, in1;
   __m128i tmp;
   const __m128i zero = _mm_setzero_si128();
@@ -44,7 +44,7 @@ void vp10_fdct4x4_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   store_output(&in0, output);
 }
 
-void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride));
   __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride));
   __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride));
@@ -84,7 +84,7 @@ void vp10_fdct8x8_1_sse2(const int16_t *input, tran_low_t *output, int stride) {
   store_output(&in1, output);
 }
 
-void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
+void av1_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
                            int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
@@ -153,7 +153,7 @@ void vp10_fdct16x16_1_sse2(const int16_t *input, tran_low_t *output,
   store_output(&in1, output);
 }
 
-void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
+void av1_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
                            int stride) {
   __m128i in0, in1, in2, in3;
   __m128i u0, u1;
@@ -226,47 +226,47 @@ void vp10_fdct32x32_1_sse2(const int16_t *input, tran_low_t *output,
 }
 
 #define DCT_HIGH_BIT_DEPTH 0
-#define FDCT4x4_2D vp10_fdct4x4_sse2
-#define FDCT8x8_2D vp10_fdct8x8_sse2
-#define FDCT16x16_2D vp10_fdct16x16_sse2
+#define FDCT4x4_2D av1_fdct4x4_sse2
+#define FDCT8x8_2D av1_fdct8x8_sse2
+#define FDCT16x16_2D av1_fdct16x16_sse2
 #include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vp10_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vp10_fdct32x32_sse2
+#define FDCT32x32_2D av1_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define DCT_HIGH_BIT_DEPTH 1
-#define FDCT4x4_2D vp10_highbd_fdct4x4_sse2
-#define FDCT8x8_2D vp10_highbd_fdct8x8_sse2
-#define FDCT16x16_2D vp10_highbd_fdct16x16_sse2
+#define FDCT4x4_2D av1_highbd_fdct4x4_sse2
+#define FDCT8x8_2D av1_highbd_fdct8x8_sse2
+#define FDCT16x16_2D av1_highbd_fdct16x16_sse2
 #include "av1/common/x86/av1_fwd_txfm_impl_sse2.h"  // NOLINT
 #undef FDCT4x4_2D
 #undef FDCT8x8_2D
 #undef FDCT16x16_2D
 
-#define FDCT32x32_2D vp10_highbd_fdct32x32_rd_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_rd_sse2
 #define FDCT32x32_HIGH_PRECISION 0
 #include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 
-#define FDCT32x32_2D vp10_highbd_fdct32x32_sse2
+#define FDCT32x32_2D av1_highbd_fdct32x32_sse2
 #define FDCT32x32_HIGH_PRECISION 1
 #include "av1/common/x86/av1_fwd_dct32x32_impl_sse2.h"  // NOLINT
 #undef FDCT32x32_2D
 #undef FDCT32x32_HIGH_PRECISION
 #undef DCT_HIGH_BIT_DEPTH
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/av1_inv_txfm_sse2.c b/av1/common/x86/av1_inv_txfm_sse2.c
index ede75f9a7a2659b3e4abc262ed8478697e6f5767..e8c50723f4697a28ff5235f8ed6cd4be5b1f16b8 100644
--- a/av1/common/x86/av1_inv_txfm_sse2.c
+++ b/av1/common/x86/av1_inv_txfm_sse2.c
@@ -22,7 +22,7 @@
     *(int *)(dest) = _mm_cvtsi128_si32(d0);               \
   }
 
-void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i eight = _mm_set1_epi16(8);
   const __m128i cst = _mm_setr_epi16(
@@ -152,7 +152,7 @@ void vp10_idct4x4_16_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   }
 }
 
-void vp10_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct4x4_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a;
@@ -177,7 +177,7 @@ static INLINE void transpose_4x4(__m128i *res) {
   res[1] = _mm_unpackhi_epi16(tr0_0, tr0_1);
 }
 
-void vp10_idct4_sse2(__m128i *in) {
+void av1_idct4_sse2(__m128i *in) {
   const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
   const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
   const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64);
@@ -213,7 +213,7 @@ void vp10_idct4_sse2(__m128i *in) {
   in[1] = _mm_shuffle_epi32(in[1], 0x4E);
 }
 
-void vp10_iadst4_sse2(__m128i *in) {
+void av1_iadst4_sse2(__m128i *in) {
   const __m128i k__sinpi_p01_p04 = pair_set_epi16(sinpi_1_9, sinpi_4_9);
   const __m128i k__sinpi_p03_p02 = pair_set_epi16(sinpi_3_9, sinpi_2_9);
   const __m128i k__sinpi_p02_m01 = pair_set_epi16(sinpi_2_9, -sinpi_1_9);
@@ -447,7 +447,7 @@ void vp10_iadst4_sse2(__m128i *in) {
     out7 = _mm_subs_epi16(stp1_0, stp2_7);                                    \
   }
 
-void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -478,11 +478,11 @@ void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
 
   // 2-D
   for (i = 0; i < 2; i++) {
-    // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+    // 8x8 Transpose is copied from av1_fdct8x8_sse2()
     TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
                   in4, in5, in6, in7);
 
-    // 4-stage 1D vp10_idct8x8
+    // 4-stage 1D av1_idct8x8
     IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4, in5,
           in6, in7);
   }
@@ -516,7 +516,7 @@ void vp10_idct8x8_64_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   RECON_AND_STORE(dest + 7 * stride, in7);
 }
 
-void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
   int a;
@@ -537,7 +537,7 @@ void vp10_idct8x8_1_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   RECON_AND_STORE(dest + 7 * stride, dc_value);
 }
 
-void vp10_idct8_sse2(__m128i *in) {
+void av1_idct8_sse2(__m128i *in) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64);
@@ -553,16 +553,16 @@ void vp10_idct8_sse2(__m128i *in) {
   __m128i stp2_0, stp2_1, stp2_2, stp2_3, stp2_4, stp2_5, stp2_6, stp2_7;
   __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
 
-  // 8x8 Transpose is copied from vp10_fdct8x8_sse2()
+  // 8x8 Transpose is copied from av1_fdct8x8_sse2()
   TRANSPOSE_8X8(in[0], in[1], in[2], in[3], in[4], in[5], in[6], in[7], in0,
                 in1, in2, in3, in4, in5, in6, in7);
 
-  // 4-stage 1D vp10_idct8x8
+  // 4-stage 1D av1_idct8x8
   IDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in[0], in[1], in[2], in[3],
         in[4], in[5], in[6], in[7]);
 }
 
-void vp10_iadst8_sse2(__m128i *in) {
+void av1_iadst8_sse2(__m128i *in) {
   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
   const __m128i k__cospi_p10_p22 = pair_set_epi16(cospi_10_64, cospi_22_64);
@@ -790,7 +790,7 @@ void vp10_iadst8_sse2(__m128i *in) {
   in[7] = _mm_sub_epi16(k__const_0, s1);
 }
 
-void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
+void av1_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
   const __m128i zero = _mm_setzero_si128();
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 4);
@@ -1159,7 +1159,7 @@ void vp10_idct8x8_12_add_sse2(const int16_t *input, uint8_t *dest, int stride) {
                            stp2_12)                                            \
   }
 
-void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
                                  int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -1201,7 +1201,7 @@ void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
 
   curr1 = l;
   for (i = 0; i < 2; i++) {
-    // 1-D vp10_idct
+    // 1-D av1_idct
 
     // Load input data.
     in[0] = _mm_load_si128((const __m128i *)input);
@@ -1249,7 +1249,7 @@ void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
   }
   for (i = 0; i < 2; i++) {
     int j;
-    // 1-D vp10_idct
+    // 1-D av1_idct
     array_transpose_8x8(l + i * 8, in);
     array_transpose_8x8(r + i * 8, in + 8);
 
@@ -1284,7 +1284,7 @@ void vp10_idct16x16_256_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
                                int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -1317,7 +1317,7 @@ void vp10_idct16x16_1_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-static void vp10_iadst16_8col(__m128i *in) {
+static void av1_iadst16_8col(__m128i *in) {
   // perform 16x16 1-D ADST for 8 columns
   __m128i s[16], x[16], u[32], v[32];
   const __m128i k__cospi_p01_p31 = pair_set_epi16(cospi_1_64, cospi_31_64);
@@ -1787,7 +1787,7 @@ static void vp10_iadst16_8col(__m128i *in) {
   in[15] = _mm_sub_epi16(kZero, s[1]);
 }
 
-static void vp10_idct16_8col(__m128i *in) {
+static void av1_idct16_8col(__m128i *in) {
   const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
   const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
   const __m128i k__cospi_p14_m18 = pair_set_epi16(cospi_14_64, -cospi_18_64);
@@ -2131,19 +2131,19 @@ static void vp10_idct16_8col(__m128i *in) {
   in[15] = _mm_sub_epi16(s[0], s[15]);
 }
 
-void vp10_idct16_sse2(__m128i *in0, __m128i *in1) {
+void av1_idct16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
-  vp10_idct16_8col(in0);
-  vp10_idct16_8col(in1);
+  av1_idct16_8col(in0);
+  av1_idct16_8col(in1);
 }
 
-void vp10_iadst16_sse2(__m128i *in0, __m128i *in1) {
+void av1_iadst16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
-  vp10_iadst16_8col(in0);
-  vp10_iadst16_8col(in1);
+  av1_iadst16_8col(in0);
+  av1_iadst16_8col(in1);
 }
 
-void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
                                 int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
@@ -3017,12 +3017,12 @@ void vp10_idct16x16_10_add_sse2(const int16_t *input, uint8_t *dest,
   }
 
 // Only upper-left 8x8 has non-zero coeff
-void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
                                 int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
 
-  // vp10_idct constants for each stage
+  // av1_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
   const __m128i stg1_6 = pair_set_epi16(cospi_7_64, -cospi_25_64);
@@ -3174,13 +3174,13 @@ void vp10_idct32x32_34_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
                                   int stride) {
   const __m128i rounding = _mm_set1_epi32(DCT_CONST_ROUNDING);
   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
   const __m128i zero = _mm_setzero_si128();
 
-  // vp10_idct constants for each stage
+  // av1_idct constants for each stage
   const __m128i stg1_0 = pair_set_epi16(cospi_31_64, -cospi_1_64);
   const __m128i stg1_1 = pair_set_epi16(cospi_1_64, cospi_31_64);
   const __m128i stg1_2 = pair_set_epi16(cospi_15_64, -cospi_17_64);
@@ -3242,7 +3242,7 @@ void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
 
   for (i = 0; i < 4; i++) {
     i32 = (i << 5);
-    // First 1-D vp10_idct
+    // First 1-D av1_idct
     // Load input data.
     LOAD_DQCOEFF(in[0], input);
     LOAD_DQCOEFF(in[8], input);
@@ -3392,7 +3392,7 @@ void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
     col[i32 + 31] = _mm_sub_epi16(stp1_0, stp1_31);
   }
   for (i = 0; i < 4; i++) {
-    // Second 1-D vp10_idct
+    // Second 1-D av1_idct
     j = i << 3;
 
     // Transpose 32x8 block to 8x32 block
@@ -3448,7 +3448,7 @@ void vp10_idct32x32_1024_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
+void av1_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
                                int stride) {
   __m128i dc_value;
   const __m128i zero = _mm_setzero_si128();
@@ -3469,7 +3469,7 @@ void vp10_idct32x32_1_add_sse2(const int16_t *input, uint8_t *dest,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   __m128i ubounded, retval;
   const __m128i zero = _mm_set1_epi16(0);
@@ -3483,7 +3483,7 @@ static INLINE __m128i clamp_high_sse2(__m128i value, int bd) {
   return retval;
 }
 
-void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                      int stride, int bd) {
   tran_low_t out[4 * 4];
   tran_low_t *outptr = out;
@@ -3517,7 +3517,7 @@ void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
 
   if (!test) {
     // Do the row transform
-    vp10_idct4_sse2(inptr);
+    av1_idct4_sse2(inptr);
 
     // Check the min & max values
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3546,14 +3546,14 @@ void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct4_c(input, outptr, bd);
+      av1_highbd_idct4_c(input, outptr, bd);
       input += 4;
       outptr += 4;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct4_sse2(inptr);
+    av1_idct4_sse2(inptr);
 
     // Final round and shift
     inptr[0] = _mm_add_epi16(inptr[0], eight);
@@ -3589,7 +3589,7 @@ void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
     // Columns
     for (i = 0; i < 4; ++i) {
       for (j = 0; j < 4; ++j) temp_in[j] = out[j * 4 + i];
-      vp10_highbd_idct4_c(temp_in, temp_out, bd);
+      av1_highbd_idct4_c(temp_in, temp_out, bd);
       for (j = 0; j < 4; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 4), bd);
@@ -3598,7 +3598,7 @@ void vp10_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                      int stride, int bd) {
   tran_low_t out[8 * 8];
   tran_low_t *outptr = out;
@@ -3633,7 +3633,7 @@ void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
 
   if (!test) {
     // Do the row transform
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Find the min & max for the column transform
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3663,14 +3663,14 @@ void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 8; ++i) {
-      vp10_highbd_idct8_c(input, outptr, bd);
+      av1_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3689,7 +3689,7 @@ void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vp10_highbd_idct8_c(temp_in, temp_out, bd);
+      av1_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3698,7 +3698,7 @@ void vp10_highbd_idct8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                      int stride, int bd) {
   tran_low_t out[8 * 8] = { 0 };
   tran_low_t *outptr = out;
@@ -3734,7 +3734,7 @@ void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
 
   if (!test) {
     // Do the row transform
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Find the min & max for the column transform
     // N.B. Only first 4 cols contain non-zero coeffs
@@ -3766,14 +3766,14 @@ void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct8_c(input, outptr, bd);
+      av1_highbd_idct8_c(input, outptr, bd);
       input += 8;
       outptr += 8;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct8_sse2(inptr);
+    av1_idct8_sse2(inptr);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3792,7 +3792,7 @@ void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
     tran_low_t temp_in[8], temp_out[8];
     for (i = 0; i < 8; ++i) {
       for (j = 0; j < 8; ++j) temp_in[j] = out[j * 8 + i];
-      vp10_highbd_idct8_c(temp_in, temp_out, bd);
+      av1_highbd_idct8_c(temp_in, temp_out, bd);
       for (j = 0; j < 8; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 5), bd);
@@ -3801,7 +3801,7 @@ void vp10_highbd_idct8x8_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                         int stride, int bd) {
   tran_low_t out[16 * 16];
   tran_low_t *outptr = out;
@@ -3839,7 +3839,7 @@ void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
 
   if (!test) {
     // Do the row transform
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Find the min & max for the column transform
     max_input = _mm_max_epi16(inptr[0], inptr[1]);
@@ -3874,14 +3874,14 @@ void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 16; ++i) {
-      vp10_highbd_idct16_c(input, outptr, bd);
+      av1_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -3905,7 +3905,7 @@ void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vp10_highbd_idct16_c(temp_in, temp_out, bd);
+      av1_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -3914,7 +3914,7 @@ void vp10_highbd_idct16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest8,
   }
 }
 
-void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
+void av1_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
                                        int stride, int bd) {
   tran_low_t out[16 * 16] = { 0 };
   tran_low_t *outptr = out;
@@ -3954,7 +3954,7 @@ void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
 
   if (!test) {
     // Do the row transform (N.B. This transposes inptr)
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Find the min & max for the column transform
     // N.B. Only first 4 cols contain non-zero coeffs
@@ -3992,14 +3992,14 @@ void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
   } else {
     // Run the un-optimised row transform
     for (i = 0; i < 4; ++i) {
-      vp10_highbd_idct16_c(input, outptr, bd);
+      av1_highbd_idct16_c(input, outptr, bd);
       input += 16;
       outptr += 16;
     }
   }
 
   if (optimised_cols) {
-    vp10_idct16_sse2(inptr, inptr + 16);
+    av1_idct16_sse2(inptr, inptr + 16);
 
     // Final round & shift and Reconstruction and Store
     {
@@ -4023,7 +4023,7 @@ void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
     tran_low_t temp_in[16], temp_out[16];
     for (i = 0; i < 16; ++i) {
       for (j = 0; j < 16; ++j) temp_in[j] = out[j * 16 + i];
-      vp10_highbd_idct16_c(temp_in, temp_out, bd);
+      av1_highbd_idct16_c(temp_in, temp_out, bd);
       for (j = 0; j < 16; ++j) {
         dest[j * stride + i] = highbd_clip_pixel_add(
             dest[j * stride + i], ROUND_POWER_OF_TWO(temp_out[j], 6), bd);
@@ -4031,4 +4031,4 @@ void vp10_highbd_idct16x16_10_add_sse2(const tran_low_t *input, uint8_t *dest8,
     }
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/common/x86/idct_intrin_sse2.c b/av1/common/x86/idct_intrin_sse2.c
index 4b948f9c4a7ea0e748571fd771f099518e83233c..2247b67faa08fff42147cbd5af08aacf0da5b093 100644
--- a/av1/common/x86/idct_intrin_sse2.c
+++ b/av1/common/x86/idct_intrin_sse2.c
@@ -13,7 +13,7 @@
 #include "aom_dsp/x86/txfm_common_sse2.h"
 #include "aom_ports/mem.h"
 
-void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
                              int tx_type) {
   __m128i in[2];
   const __m128i zero = _mm_setzero_si128();
@@ -76,7 +76,7 @@ void vp10_iht4x4_16_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
   }
 }
 
-void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
+void av1_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
                              int tx_type) {
   __m128i in[8];
   const __m128i zero = _mm_setzero_si128();
@@ -141,7 +141,7 @@ void vp10_iht8x8_64_add_sse2(const tran_low_t *input, uint8_t *dest, int stride,
   RECON_AND_STORE(dest + 7 * stride, in[7]);
 }
 
-void vp10_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
+void av1_iht16x16_256_add_sse2(const tran_low_t *input, uint8_t *dest,
                                 int stride, int tx_type) {
   __m128i in0[16], in1[16];
 
diff --git a/av1/decoder/decodeframe.c b/av1/decoder/decodeframe.c
index 555032a6e29a156294d3400d44892d99a3bc809e..ec80ec6d2aa609bfa8ebce5350eb01bfad29103a 100644
--- a/av1/decoder/decodeframe.c
+++ b/av1/decoder/decodeframe.c
@@ -51,9 +51,9 @@
 #include "av1/decoder/decoder.h"
 #include "av1/decoder/dsubexp.h"
 
-#define MAX_VP10_HEADER_SIZE 80
+#define MAX_AV1_HEADER_SIZE 80
 
-static int is_compound_reference_allowed(const VP10_COMMON *cm) {
+static int is_compound_reference_allowed(const AV1_COMMON *cm) {
   int i;
   if (frame_is_intra_only(cm)) return 0;
   for (i = 1; i < REFS_PER_FRAME; ++i)
@@ -62,7 +62,7 @@ static int is_compound_reference_allowed(const VP10_COMMON *cm) {
   return 0;
 }
 
-static void setup_compound_reference_mode(VP10_COMMON *cm) {
+static void setup_compound_reference_mode(AV1_COMMON *cm) {
   if (cm->ref_frame_sign_bias[LAST_FRAME] ==
       cm->ref_frame_sign_bias[GOLDEN_FRAME]) {
     cm->comp_fixed_ref = ALTREF_FRAME;
@@ -106,34 +106,34 @@ static void read_tx_mode_probs(struct tx_probs *tx_probs, aom_reader *r) {
 
   for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
     for (j = 0; j < TX_SIZES - 3; ++j)
-      vp10_diff_update_prob(r, &tx_probs->p8x8[i][j]);
+      av1_diff_update_prob(r, &tx_probs->p8x8[i][j]);
 
   for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
     for (j = 0; j < TX_SIZES - 2; ++j)
-      vp10_diff_update_prob(r, &tx_probs->p16x16[i][j]);
+      av1_diff_update_prob(r, &tx_probs->p16x16[i][j]);
 
   for (i = 0; i < TX_SIZE_CONTEXTS; ++i)
     for (j = 0; j < TX_SIZES - 1; ++j)
-      vp10_diff_update_prob(r, &tx_probs->p32x32[i][j]);
+      av1_diff_update_prob(r, &tx_probs->p32x32[i][j]);
 }
 
 static void read_switchable_interp_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j;
   for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
     for (i = 0; i < SWITCHABLE_FILTERS - 1; ++i)
-      vp10_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
+      av1_diff_update_prob(r, &fc->switchable_interp_prob[j][i]);
 }
 
 static void read_inter_mode_probs(FRAME_CONTEXT *fc, aom_reader *r) {
   int i, j;
   for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
     for (j = 0; j < INTER_MODES - 1; ++j)
-      vp10_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
+      av1_diff_update_prob(r, &fc->inter_mode_probs[i][j]);
 }
 
 #if CONFIG_MISC_FIXES
 static REFERENCE_MODE read_frame_reference_mode(
-    const VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+    const AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   if (is_compound_reference_allowed(cm)) {
     return aom_rb_read_bit(rb)
                ? REFERENCE_MODE_SELECT
@@ -143,7 +143,7 @@ static REFERENCE_MODE read_frame_reference_mode(
   }
 }
 #else
-static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
+static REFERENCE_MODE read_frame_reference_mode(const AV1_COMMON *cm,
                                                 aom_reader *r) {
   if (is_compound_reference_allowed(cm)) {
     return aom_read_bit(r)
@@ -155,30 +155,30 @@ static REFERENCE_MODE read_frame_reference_mode(const VP10_COMMON *cm,
 }
 #endif
 
-static void read_frame_reference_mode_probs(VP10_COMMON *cm, aom_reader *r) {
+static void read_frame_reference_mode_probs(AV1_COMMON *cm, aom_reader *r) {
   FRAME_CONTEXT *const fc = cm->fc;
   int i;
 
   if (cm->reference_mode == REFERENCE_MODE_SELECT)
     for (i = 0; i < COMP_INTER_CONTEXTS; ++i)
-      vp10_diff_update_prob(r, &fc->comp_inter_prob[i]);
+      av1_diff_update_prob(r, &fc->comp_inter_prob[i]);
 
   if (cm->reference_mode != COMPOUND_REFERENCE)
     for (i = 0; i < REF_CONTEXTS; ++i) {
-      vp10_diff_update_prob(r, &fc->single_ref_prob[i][0]);
-      vp10_diff_update_prob(r, &fc->single_ref_prob[i][1]);
+      av1_diff_update_prob(r, &fc->single_ref_prob[i][0]);
+      av1_diff_update_prob(r, &fc->single_ref_prob[i][1]);
     }
 
   if (cm->reference_mode != SINGLE_REFERENCE)
     for (i = 0; i < REF_CONTEXTS; ++i)
-      vp10_diff_update_prob(r, &fc->comp_ref_prob[i]);
+      av1_diff_update_prob(r, &fc->comp_ref_prob[i]);
 }
 
 static void update_mv_probs(aom_prob *p, int n, aom_reader *r) {
   int i;
   for (i = 0; i < n; ++i)
 #if CONFIG_MISC_FIXES
-    vp10_diff_update_prob(r, &p[i]);
+    av1_diff_update_prob(r, &p[i]);
 #else
     if (aom_read(r, MV_UPDATE_PROB)) p[i] = (aom_read_literal(r, 7) << 1) | 1;
 #endif
@@ -221,48 +221,48 @@ static void inverse_transform_block_inter(MACROBLOCKD *xd, int plane,
   const int seg_id = xd->mi[0]->mbmi.segment_id;
   if (eob > 0) {
     tran_low_t *const dqcoeff = pd->dqcoeff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       switch (tx_size) {
         case TX_4X4:
-          vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
                                        tx_type, xd->lossless[seg_id]);
           break;
         case TX_8X8:
-          vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
                                        tx_type);
           break;
         case TX_16X16:
-          vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
                                          tx_type);
           break;
         case TX_32X32:
-          vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
                                          tx_type);
           break;
         default: assert(0 && "Invalid transform size"); return;
       }
     } else {
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       switch (tx_size) {
         case TX_4X4:
-          vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
+          av1_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
                                 xd->lossless[seg_id]);
           break;
         case TX_8X8:
-          vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
+          av1_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
           break;
         case TX_16X16:
-          vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
+          av1_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
           break;
         case TX_32X32:
-          vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
+          av1_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
           break;
         default: assert(0 && "Invalid transform size"); return;
       }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     if (eob == 1) {
       dqcoeff[0] = 0;
@@ -285,48 +285,48 @@ static void inverse_transform_block_intra(MACROBLOCKD *xd, int plane,
   const int seg_id = xd->mi[0]->mbmi.segment_id;
   if (eob > 0) {
     tran_low_t *const dqcoeff = pd->dqcoeff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       switch (tx_size) {
         case TX_4X4:
-          vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, xd->bd,
                                        tx_type, xd->lossless[seg_id]);
           break;
         case TX_8X8:
-          vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, xd->bd,
                                        tx_type);
           break;
         case TX_16X16:
-          vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, xd->bd,
                                          tx_type);
           break;
         case TX_32X32:
-          vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
+          av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, xd->bd,
                                          tx_type);
           break;
         default: assert(0 && "Invalid transform size"); return;
       }
     } else {
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       switch (tx_size) {
         case TX_4X4:
-          vp10_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
+          av1_inv_txfm_add_4x4(dqcoeff, dst, stride, eob, tx_type,
                                 xd->lossless[seg_id]);
           break;
         case TX_8X8:
-          vp10_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
+          av1_inv_txfm_add_8x8(dqcoeff, dst, stride, eob, tx_type);
           break;
         case TX_16X16:
-          vp10_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
+          av1_inv_txfm_add_16x16(dqcoeff, dst, stride, eob, tx_type);
           break;
         case TX_32X32:
-          vp10_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
+          av1_inv_txfm_add_32x32(dqcoeff, dst, stride, eob, tx_type);
           break;
         default: assert(0 && "Invalid transform size"); return;
       }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     if (eob == 1) {
       dqcoeff[0] = 0;
@@ -356,14 +356,14 @@ static void predict_and_reconstruct_intra_block(MACROBLOCKD *const xd,
   if (mbmi->sb_type < BLOCK_8X8)
     if (plane == 0) mode = xd->mi[0]->bmi[(row << 1) + col].as_mode;
 
-  vp10_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
+  av1_predict_intra_block(xd, pd->n4_wl, pd->n4_hl, tx_size, mode, dst,
                            pd->dst.stride, dst, pd->dst.stride, col, row,
                            plane);
 
   if (!mbmi->skip) {
     TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
     const scan_order *sc = get_scan(tx_size, tx_type);
-    const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size,
+    const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size,
                                              r, mbmi->segment_id);
     inverse_transform_block_intra(xd, plane, tx_type, tx_size, dst,
                                   pd->dst.stride, eob);
@@ -378,7 +378,7 @@ static int reconstruct_inter_block(MACROBLOCKD *const xd, aom_reader *r,
   int block_idx = (row << 1) + col;
   TX_TYPE tx_type = get_tx_type(plane_type, xd, block_idx);
   const scan_order *sc = get_scan(tx_size, tx_type);
-  const int eob = vp10_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
+  const int eob = av1_decode_block_tokens(xd, plane, sc, col, row, tx_size, r,
                                            mbmi->segment_id);
 
   inverse_transform_block_inter(
@@ -423,7 +423,7 @@ static void build_mc_border(const uint8_t *src, int src_stride, uint8_t *dst,
   } while (--b_h);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void high_build_mc_border(const uint8_t *src8, int src_stride,
                                  uint16_t *dst, int dst_stride, int x, int y,
                                  int b_w, int b_h, int w, int h) {
@@ -460,9 +460,9 @@ static void high_build_mc_border(const uint8_t *src8, int src_stride,
     if (y > 0 && y < h) ref_row += src_stride;
   } while (--b_h);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
                                int x0, int y0, int b_w, int b_h,
                                int frame_width, int frame_height,
@@ -511,15 +511,15 @@ static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
   inter_predictor(buf_ptr, b_w, dst, dst_buf_stride, subpel_x, subpel_y, sf, w,
                   h, ref, kernel, xs, ys);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static void dec_build_inter_predictors(
-    VP10Decoder *const pbi, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
+    AV1Decoder *const pbi, MACROBLOCKD *xd, int plane, int bw, int bh, int x,
     int y, int w, int h, int mi_x, int mi_y, const InterpKernel *kernel,
     const struct scale_factors *sf, struct buf_2d *pre_buf,
     struct buf_2d *dst_buf, const MV *mv, RefCntBuffer *ref_frame_buf,
     int is_scaled, int ref) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   struct macroblockd_plane *const pd = &xd->plane[plane];
   uint8_t *const dst = dst_buf->buf + dst_buf->stride * y + x;
   MV32 scaled_mv;
@@ -561,7 +561,7 @@ static void dec_build_inter_predictors(
 
     // Scale the MV and incorporate the sub-pixel offset of the block
     // in the reference frame.
-    scaled_mv = vp10_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
+    scaled_mv = av1_scale_mv(&mv_q4, mi_x + x, mi_y + y, sf);
     xs = sf->x_step_q4;
     ys = sf->y_step_q4;
   } else {
@@ -616,7 +616,7 @@ static void dec_build_inter_predictors(
     // Wait until reference block is ready. Pad 7 more pixels as last 7
     // pixels of each superblock row can be changed by next superblock row.
     if (cm->frame_parallel_decode)
-      vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+      av1_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
                             VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
 
     // Skip border extension if block is inside the frame.
@@ -631,7 +631,7 @@ static void dec_build_inter_predictors(
       extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h, frame_width,
                          frame_height, border_offset, dst, dst_buf->stride,
                          subpel_x, subpel_y, kernel, sf,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                          xd,
 #endif
                          w, h, ref, xs, ys);
@@ -642,11 +642,11 @@ static void dec_build_inter_predictors(
     // pixels of each superblock row can be changed by next superblock row.
     if (cm->frame_parallel_decode) {
       const int y1 = (y0_16 + (h - 1) * ys) >> SUBPEL_BITS;
-      vp10_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
+      av1_frameworker_wait(pbi->frame_worker_owner, ref_frame_buf,
                             VPXMAX(0, (y1 + 7)) << (plane == 0 ? 0 : 1));
     }
   }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     high_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x,
                          subpel_y, sf, w, h, ref, kernel, xs, ys, xd->bd);
@@ -657,17 +657,17 @@ static void dec_build_inter_predictors(
 #else
   inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride, subpel_x, subpel_y,
                   sf, w, h, ref, kernel, xs, ys);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
-static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
+static void dec_build_inter_predictors_sb(AV1Decoder *const pbi,
                                           MACROBLOCKD *xd, int mi_row,
                                           int mi_col) {
   int plane;
   const int mi_x = mi_col * MI_SIZE;
   const int mi_y = mi_row * MI_SIZE;
   const MODE_INFO *mi = xd->mi[0];
-  const InterpKernel *kernel = vp10_filter_kernels[mi->mbmi.interp_filter];
+  const InterpKernel *kernel = av1_filter_kernels[mi->mbmi.interp_filter];
   const BLOCK_SIZE sb_type = mi->mbmi.sb_type;
   const int is_compound = has_second_ref(&mi->mbmi);
 
@@ -687,7 +687,7 @@ static void dec_build_inter_predictors_sb(VP10Decoder *const pbi,
       const int idx = xd->block_refs[ref]->idx;
       BufferPool *const pool = pbi->common.buffer_pool;
       RefCntBuffer *const ref_frame_buf = &pool->frame_bufs[idx];
-      const int is_scaled = vp10_is_scaled(sf);
+      const int is_scaled = av1_is_scaled(sf);
 
       if (sb_type < BLOCK_8X8) {
         const PARTITION_TYPE bp = BLOCK_8X8 - sb_type;
@@ -744,7 +744,7 @@ static void set_plane_n4(MACROBLOCKD *const xd, int bw, int bh, int bwl,
   }
 }
 
-static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static MB_MODE_INFO *set_offsets(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                  BLOCK_SIZE bsize, int mi_row, int mi_col,
                                  int bw, int bh, int x_mis, int y_mis, int bwl,
                                  int bhl) {
@@ -770,14 +770,14 @@ static MB_MODE_INFO *set_offsets(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   // as they are always compared to values that are in 1/8th pel units
   set_mi_row_col(xd, tile, mi_row, bh, mi_col, bw, cm->mi_rows, cm->mi_cols);
 
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
   return &xd->mi[0]->mbmi;
 }
 
-static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_block(AV1Decoder *const pbi, MACROBLOCKD *const xd,
                          int mi_row, int mi_col, aom_reader *r,
                          BLOCK_SIZE bsize, int bwl, int bhl) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const int less8x8 = bsize < BLOCK_8X8;
   const int bw = 1 << (bwl - 1);
   const int bh = 1 << (bhl - 1);
@@ -795,7 +795,7 @@ static void decode_block(VP10Decoder *const pbi, MACROBLOCKD *const xd,
                          "Invalid block size.");
   }
 
-  vp10_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
+  av1_read_mode_info(pbi, xd, mi_row, mi_col, r, x_mis, y_mis);
 
   if (mbmi->skip) {
     dec_reset_skip_context(xd);
@@ -895,7 +895,7 @@ static INLINE void dec_update_partition_context(MACROBLOCKD *xd, int mi_row,
   memset(left_ctx, partition_context_lookup[subsize].left, bw);
 }
 
-static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PARTITION_TYPE read_partition(AV1_COMMON *cm, MACROBLOCKD *xd,
                                      int mi_row, int mi_col, aom_reader *r,
                                      int has_rows, int has_cols, int bsl) {
   const int ctx = dec_partition_plane_context(xd, mi_row, mi_col, bsl);
@@ -904,7 +904,7 @@ static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
   PARTITION_TYPE p;
 
   if (has_rows && has_cols)
-    p = (PARTITION_TYPE)aom_read_tree(r, vp10_partition_tree, probs);
+    p = (PARTITION_TYPE)aom_read_tree(r, av1_partition_tree, probs);
   else if (!has_rows && has_cols)
     p = aom_read(r, probs[1]) ? PARTITION_SPLIT : PARTITION_HORZ;
   else if (has_rows && !has_cols)
@@ -918,10 +918,10 @@ static PARTITION_TYPE read_partition(VP10_COMMON *cm, MACROBLOCKD *xd,
 }
 
 // TODO(slavarnway): eliminate bsize and subsize in future commits
-static void decode_partition(VP10Decoder *const pbi, MACROBLOCKD *const xd,
+static void decode_partition(AV1Decoder *const pbi, MACROBLOCKD *const xd,
                              int mi_row, int mi_col, aom_reader *r,
                              BLOCK_SIZE bsize, int n4x4_l2) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const int n8x8_l2 = n4x4_l2 - 1;
   const int num_8x8_wh = 1 << n8x8_l2;
   const int hbs = num_8x8_wh >> 1;
@@ -1002,7 +1002,7 @@ static void setup_token_decoder(const uint8_t *data, const uint8_t *data_end,
                        "Failed to allocate bool decoder %d", 1);
 }
 
-static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
+static void read_coef_probs_common(av1_coeff_probs_model *coef_probs,
                                    aom_reader *r) {
   int i, j, k, l, m;
 
@@ -1012,7 +1012,7 @@ static void read_coef_probs_common(vp10_coeff_probs_model *coef_probs,
         for (k = 0; k < COEF_BANDS; ++k)
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l)
             for (m = 0; m < UNCONSTRAINED_NODES; ++m)
-              vp10_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
+              av1_diff_update_prob(r, &coef_probs[i][j][k][l][m]);
 }
 
 static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
@@ -1022,7 +1022,7 @@ static void read_coef_probs(FRAME_CONTEXT *fc, TX_MODE tx_mode, aom_reader *r) {
     read_coef_probs_common(fc->coef_probs[tx_size], r);
 }
 
-static void setup_segmentation(VP10_COMMON *const cm,
+static void setup_segmentation(AV1_COMMON *const cm,
                                struct aom_read_bit_buffer *rb) {
   struct segmentation *const seg = &cm->seg;
 #if !CONFIG_MISC_FIXES
@@ -1069,19 +1069,19 @@ static void setup_segmentation(VP10_COMMON *const cm,
   if (seg->update_data) {
     seg->abs_delta = aom_rb_read_bit(rb);
 
-    vp10_clearall_segfeatures(seg);
+    av1_clearall_segfeatures(seg);
 
     for (i = 0; i < MAX_SEGMENTS; i++) {
       for (j = 0; j < SEG_LVL_MAX; j++) {
         int data = 0;
         const int feature_enabled = aom_rb_read_bit(rb);
         if (feature_enabled) {
-          vp10_enable_segfeature(seg, i, j);
-          data = decode_unsigned_max(rb, vp10_seg_feature_data_max(j));
-          if (vp10_is_segfeature_signed(j))
+          av1_enable_segfeature(seg, i, j);
+          data = decode_unsigned_max(rb, av1_seg_feature_data_max(j));
+          if (av1_is_segfeature_signed(j))
             data = aom_rb_read_bit(rb) ? -data : data;
         }
-        vp10_set_segdata(seg, i, j, data);
+        av1_set_segdata(seg, i, j, data);
       }
     }
   }
@@ -1114,13 +1114,13 @@ static void setup_loopfilter(struct loopfilter *lf,
 }
 
 #if CONFIG_CLPF
-static void setup_clpf(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_clpf(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   cm->clpf = aom_rb_read_literal(rb, 1);
 }
 #endif
 
 #if CONFIG_DERING
-static void setup_dering(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_dering(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   cm->dering_level = aom_rb_read_literal(rb,  DERING_LEVEL_BITS);
 }
 #endif  // CONFIG_DERING
@@ -1131,7 +1131,7 @@ static INLINE int read_delta_q(struct aom_read_bit_buffer *rb) {
              : 0;
 }
 
-static void setup_quantization(VP10_COMMON *const cm,
+static void setup_quantization(AV1_COMMON *const cm,
                                struct aom_read_bit_buffer *rb) {
   cm->base_qindex = aom_rb_read_literal(rb, QINDEX_BITS);
   cm->y_dc_delta_q = read_delta_q(rb);
@@ -1150,7 +1150,7 @@ static void setup_quantization(VP10_COMMON *const cm,
 #endif
 }
 
-static void setup_segmentation_dequant(VP10_COMMON *const cm) {
+static void setup_segmentation_dequant(AV1_COMMON *const cm) {
   // Build y/uv dequant values based on segmentation.
   int i = 0;
 #if CONFIG_AOM_QM
@@ -1163,14 +1163,14 @@ static void setup_segmentation_dequant(VP10_COMMON *const cm) {
 #endif
   if (cm->seg.enabled) {
     for (i = 0; i < MAX_SEGMENTS; ++i) {
-      const int qindex = vp10_get_qindex(&cm->seg, i, cm->base_qindex);
+      const int qindex = av1_get_qindex(&cm->seg, i, cm->base_qindex);
       cm->y_dequant[i][0] =
-          vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
-      cm->y_dequant[i][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+          av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+      cm->y_dequant[i][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
       cm->uv_dequant[i][0] =
-          vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+          av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
       cm->uv_dequant[i][1] =
-          vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+          av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
 #if CONFIG_AOM_QM
       lossless = qindex == 0 && cm->y_dc_delta_q == 0 &&
                  cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -1192,12 +1192,12 @@ static void setup_segmentation_dequant(VP10_COMMON *const cm) {
     // When segmentation is disabled, only the first value is used.  The
     // remaining are don't cares.
     cm->y_dequant[0][0] =
-        vp10_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
-    cm->y_dequant[0][1] = vp10_ac_quant(qindex, 0, cm->bit_depth);
+        av1_dc_quant(qindex, cm->y_dc_delta_q, cm->bit_depth);
+    cm->y_dequant[0][1] = av1_ac_quant(qindex, 0, cm->bit_depth);
     cm->uv_dequant[0][0] =
-        vp10_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
+        av1_dc_quant(qindex, cm->uv_dc_delta_q, cm->bit_depth);
     cm->uv_dequant[0][1] =
-        vp10_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
+        av1_ac_quant(qindex, cm->uv_ac_delta_q, cm->bit_depth);
 #if CONFIG_AOM_QM
     lossless = qindex == 0 && cm->y_dc_delta_q == 0 && cm->uv_dc_delta_q == 0 &&
                cm->uv_ac_delta_q == 0;
@@ -1219,14 +1219,14 @@ static INTERP_FILTER read_interp_filter(struct aom_read_bit_buffer *rb) {
   return aom_rb_read_bit(rb) ? SWITCHABLE : aom_rb_read_literal(rb, 2);
 }
 
-static void setup_render_size(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_render_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   cm->render_width = cm->width;
   cm->render_height = cm->height;
   if (aom_rb_read_bit(rb))
-    vp10_read_frame_size(rb, &cm->render_width, &cm->render_height);
+    av1_read_frame_size(rb, &cm->render_width, &cm->render_height);
 }
 
-static void resize_mv_buffer(VP10_COMMON *cm) {
+static void resize_mv_buffer(AV1_COMMON *cm) {
   aom_free(cm->cur_frame->mvs);
   cm->cur_frame->mi_rows = cm->mi_rows;
   cm->cur_frame->mi_cols = cm->mi_cols;
@@ -1234,7 +1234,7 @@ static void resize_mv_buffer(VP10_COMMON *cm) {
                                             sizeof(*cm->cur_frame->mvs));
 }
 
-static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
+static void resize_context_buffers(AV1_COMMON *cm, int width, int height) {
 #if CONFIG_SIZE_LIMIT
   if (width > DECODE_WIDTH_LIMIT || height > DECODE_HEIGHT_LIMIT)
     aom_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
@@ -1247,16 +1247,16 @@ static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
     const int new_mi_cols =
         ALIGN_POWER_OF_TWO(width, MI_SIZE_LOG2) >> MI_SIZE_LOG2;
 
-    // Allocations in vp10_alloc_context_buffers() depend on individual
+    // Allocations in av1_alloc_context_buffers() depend on individual
     // dimensions as well as the overall size.
     if (new_mi_cols > cm->mi_cols || new_mi_rows > cm->mi_rows) {
-      if (vp10_alloc_context_buffers(cm, width, height))
+      if (av1_alloc_context_buffers(cm, width, height))
         aom_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
                            "Failed to allocate context buffers");
     } else {
-      vp10_set_mb_mi(cm, width, height);
+      av1_set_mb_mi(cm, width, height);
     }
-    vp10_init_context_buffers(cm);
+    av1_init_context_buffers(cm);
     cm->width = width;
     cm->height = height;
   }
@@ -1266,10 +1266,10 @@ static void resize_context_buffers(VP10_COMMON *cm, int width, int height) {
   }
 }
 
-static void setup_frame_size(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_frame_size(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   int width, height;
   BufferPool *const pool = cm->buffer_pool;
-  vp10_read_frame_size(rb, &width, &height);
+  av1_read_frame_size(rb, &width, &height);
   resize_context_buffers(cm, width, height);
   setup_render_size(cm, rb);
 
@@ -1277,7 +1277,7 @@ static void setup_frame_size(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
   if (aom_realloc_frame_buffer(
           get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
           cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
           VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -1306,7 +1306,7 @@ static INLINE int valid_ref_frame_img_fmt(aom_bit_depth_t ref_bit_depth,
          ref_yss == this_yss;
 }
 
-static void setup_frame_size_with_refs(VP10_COMMON *cm,
+static void setup_frame_size_with_refs(AV1_COMMON *cm,
                                        struct aom_read_bit_buffer *rb) {
   int width, height;
   int found = 0, i;
@@ -1327,7 +1327,7 @@ static void setup_frame_size_with_refs(VP10_COMMON *cm,
   }
 
   if (!found) {
-    vp10_read_frame_size(rb, &width, &height);
+    av1_read_frame_size(rb, &width, &height);
 #if CONFIG_MISC_FIXES
     setup_render_size(cm, rb);
 #endif
@@ -1367,7 +1367,7 @@ static void setup_frame_size_with_refs(VP10_COMMON *cm,
   if (aom_realloc_frame_buffer(
           get_frame_new_buffer(cm), cm->width, cm->height, cm->subsampling_x,
           cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           cm->use_highbitdepth,
 #endif
           VPX_DEC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -1388,9 +1388,9 @@ static void setup_frame_size_with_refs(VP10_COMMON *cm,
   pool->frame_bufs[cm->new_fb_idx].buf.render_height = cm->render_height;
 }
 
-static void setup_tile_info(VP10_COMMON *cm, struct aom_read_bit_buffer *rb) {
+static void setup_tile_info(AV1_COMMON *cm, struct aom_read_bit_buffer *rb) {
   int min_log2_tile_cols, max_log2_tile_cols, max_ones;
-  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+  av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
   // columns
   max_ones = max_log2_tile_cols - min_log2_tile_cols;
@@ -1470,7 +1470,7 @@ static void get_tile_buffer(const uint8_t *const data_end,
   *data += size;
 }
 
-static void get_tile_buffers(VP10Decoder *pbi, const uint8_t *data,
+static void get_tile_buffers(AV1Decoder *pbi, const uint8_t *data,
                              const uint8_t *data_end, int tile_cols,
                              int tile_rows,
                              TileBuffer (*tile_buffers)[1 << 6]) {
@@ -1488,9 +1488,9 @@ static void get_tile_buffers(VP10Decoder *pbi, const uint8_t *data,
   }
 }
 
-static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles(AV1Decoder *pbi, const uint8_t *data,
                                    const uint8_t *data_end) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const VPxWorkerInterface *const winterface = aom_get_worker_interface();
   const int aligned_cols = mi_cols_aligned_to_sb(cm->mi_cols);
   const int tile_cols = 1 << cm->log2_tile_cols;
@@ -1504,7 +1504,7 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
       pbi->lf_worker.data1 == NULL) {
     CHECK_MEM_ERROR(cm, pbi->lf_worker.data1,
                     aom_memalign(32, sizeof(LFWorkerData)));
-    pbi->lf_worker.hook = (VPxWorkerHook)vp10_loop_filter_worker;
+    pbi->lf_worker.hook = (VPxWorkerHook)av1_loop_filter_worker;
     if (pbi->max_threads > 1 && !winterface->reset(&pbi->lf_worker)) {
       aom_internal_error(&cm->error, VPX_CODEC_ERROR,
                          "Loop filter thread creation failed");
@@ -1515,7 +1515,7 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
     LFWorkerData *const lf_data = (LFWorkerData *)pbi->lf_worker.data1;
     // Be sure to sync as we might be resuming after a failed frame decode.
     winterface->sync(&pbi->lf_worker);
-    vp10_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
+    av1_loop_filter_data_reset(lf_data, get_frame_new_buffer(cm), cm,
                                 pbi->mb.plane);
   }
 
@@ -1552,12 +1552,12 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
           cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
               ? &cm->counts
               : NULL;
-      vp10_zero(tile_data->dqcoeff);
-      vp10_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
+      av1_zero(tile_data->dqcoeff);
+      av1_tile_init(&tile_data->xd.tile, tile_data->cm, tile_row, tile_col);
       setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
                           &tile_data->bit_reader, pbi->decrypt_cb,
                           pbi->decrypt_state);
-      vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
+      av1_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
       tile_data->xd.plane[0].color_index_map = tile_data->color_index_map[0];
       tile_data->xd.plane[1].color_index_map = tile_data->color_index_map[1];
     }
@@ -1565,16 +1565,16 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
 
   for (tile_row = 0; tile_row < tile_rows; ++tile_row) {
     TileInfo tile;
-    vp10_tile_set_row(&tile, cm, tile_row);
+    av1_tile_set_row(&tile, cm, tile_row);
     for (mi_row = tile.mi_row_start; mi_row < tile.mi_row_end;
          mi_row += MI_BLOCK_SIZE) {
       for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
         const int col =
             pbi->inv_tile_order ? tile_cols - tile_col - 1 : tile_col;
         tile_data = pbi->tile_data + tile_cols * tile_row + col;
-        vp10_tile_set_col(&tile, tile_data->cm, col);
-        vp10_zero(tile_data->xd.left_context);
-        vp10_zero(tile_data->xd.left_seg_context);
+        av1_tile_set_col(&tile, tile_data->cm, col);
+        av1_zero(tile_data->xd.left_context);
+        av1_zero(tile_data->xd.left_seg_context);
         for (mi_col = tile.mi_col_start; mi_col < tile.mi_col_end;
              mi_col += MI_BLOCK_SIZE) {
           decode_partition(pbi, &tile_data->xd, mi_row, mi_col,
@@ -1609,7 +1609,7 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
       // still be changed by the longest loopfilter of the next superblock
       // row.
       if (cm->frame_parallel_decode)
-        vp10_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2);
+        av1_frameworker_broadcast(pbi->cur_buf, mi_row << MI_BLOCK_SIZE_LOG2);
     }
   }
 
@@ -1623,11 +1623,11 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
   }
 #if CONFIG_CLPF
   if (cm->clpf && !cm->skip_loop_filter)
-    vp10_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
+    av1_clpf_frame(&pbi->cur_buf->buf, cm, &pbi->mb);
 #endif
 #if CONFIG_DERING
   if (cm->dering_level && !cm->skip_loop_filter) {
-    vp10_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
+    av1_dering_frame(&pbi->cur_buf->buf, cm, &pbi->mb, cm->dering_level);
   }
 #endif  // CONFIG_DERING
 
@@ -1635,7 +1635,7 @@ static const uint8_t *decode_tiles(VP10Decoder *pbi, const uint8_t *data,
   tile_data = pbi->tile_data + tile_cols * tile_rows - 1;
 
   if (cm->frame_parallel_decode)
-    vp10_frameworker_broadcast(pbi->cur_buf, INT_MAX);
+    av1_frameworker_broadcast(pbi->cur_buf, INT_MAX);
   return aom_reader_find_end(&tile_data->bit_reader);
 }
 
@@ -1654,8 +1654,8 @@ static int tile_worker_hook(TileWorkerData *const tile_data,
 
   for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
        mi_row += MI_BLOCK_SIZE) {
-    vp10_zero(tile_data->xd.left_context);
-    vp10_zero(tile_data->xd.left_seg_context);
+    av1_zero(tile_data->xd.left_context);
+    av1_zero(tile_data->xd.left_seg_context);
     for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
          mi_col += MI_BLOCK_SIZE) {
       decode_partition(tile_data->pbi, &tile_data->xd, mi_row, mi_col,
@@ -1672,9 +1672,9 @@ static int compare_tile_buffers(const void *a, const void *b) {
   return (int)(buf2->size - buf1->size);
 }
 
-static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
+static const uint8_t *decode_tiles_mt(AV1Decoder *pbi, const uint8_t *data,
                                       const uint8_t *data_end) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   const VPxWorkerInterface *const winterface = aom_get_worker_interface();
   const uint8_t *bit_reader_end = NULL;
   const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
@@ -1762,7 +1762,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
     for (i = 0; i < num_workers; ++i) {
       TileWorkerData *const tile_data =
           (TileWorkerData *)pbi->tile_workers[i].data1;
-      vp10_zero(tile_data->counts);
+      av1_zero(tile_data->counts);
     }
   }
 
@@ -1782,13 +1782,13 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
           cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD
               ? &tile_data->counts
               : NULL;
-      vp10_zero(tile_data->dqcoeff);
-      vp10_tile_init(tile, cm, 0, buf->col);
-      vp10_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
+      av1_zero(tile_data->dqcoeff);
+      av1_tile_init(tile, cm, 0, buf->col);
+      av1_tile_init(&tile_data->xd.tile, cm, 0, buf->col);
       setup_token_decoder(buf->data, data_end, buf->size, &cm->error,
                           &tile_data->bit_reader, pbi->decrypt_cb,
                           pbi->decrypt_state);
-      vp10_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
+      av1_init_macroblockd(cm, &tile_data->xd, tile_data->dqcoeff);
       tile_data->xd.plane[0].color_index_map = tile_data->color_index_map[0];
       tile_data->xd.plane[1].color_index_map = tile_data->color_index_map[1];
 
@@ -1827,7 +1827,7 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
       for (i = 0; i < num_workers; ++i) {
         TileWorkerData *const tile_data =
             (TileWorkerData *)pbi->tile_workers[i].data1;
-        vp10_accumulate_frame_counts(cm, &tile_data->counts, 1);
+        av1_accumulate_frame_counts(cm, &tile_data->counts, 1);
       }
     }
   }
@@ -1836,20 +1836,20 @@ static const uint8_t *decode_tiles_mt(VP10Decoder *pbi, const uint8_t *data,
 }
 
 static void error_handler(void *data) {
-  VP10_COMMON *const cm = (VP10_COMMON *)data;
+  AV1_COMMON *const cm = (AV1_COMMON *)data;
   aom_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME, "Truncated packet");
 }
 
-static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
+static void read_bitdepth_colorspace_sampling(AV1_COMMON *cm,
                                               struct aom_read_bit_buffer *rb) {
   if (cm->profile >= PROFILE_2) {
     cm->bit_depth = aom_rb_read_bit(rb) ? VPX_BITS_12 : VPX_BITS_10;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     cm->use_highbitdepth = 1;
 #endif
   } else {
     cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     cm->use_highbitdepth = 0;
 #endif
   }
@@ -1884,9 +1884,9 @@ static void read_bitdepth_colorspace_sampling(VP10_COMMON *cm,
   }
 }
 
-static size_t read_uncompressed_header(VP10Decoder *pbi,
+static size_t read_uncompressed_header(AV1Decoder *pbi,
                                        struct aom_read_bit_buffer *rb) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   MACROBLOCKD *const xd = &pbi->mb;
   BufferPool *const pool = cm->buffer_pool;
   RefCntBuffer *const frame_bufs = pool->frame_bufs;
@@ -1900,8 +1900,8 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
     aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
                        "Invalid frame marker");
 
-  cm->profile = vp10_read_profile(rb);
-#if CONFIG_VPX_HIGHBITDEPTH
+  cm->profile = av1_read_profile(rb);
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->profile >= MAX_PROFILES)
     aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
                        "Unsupported bitstream profile");
@@ -1941,7 +1941,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
   cm->error_resilient_mode = aom_rb_read_bit(rb);
 
   if (cm->frame_type == KEY_FRAME) {
-    if (!vp10_read_sync_code(rb))
+    if (!av1_read_sync_code(rb))
       aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
                          "Invalid frame sync code");
 
@@ -1990,7 +1990,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
     }
 
     if (cm->intra_only) {
-      if (!vp10_read_sync_code(rb))
+      if (!av1_read_sync_code(rb))
         aom_internal_error(&cm->error, VPX_CODEC_UNSUP_BITSTREAM,
                            "Invalid frame sync code");
 #if CONFIG_MISC_FIXES
@@ -2000,14 +2000,14 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
         read_bitdepth_colorspace_sampling(cm, rb);
       } else {
         // NOTE: The intra-only frame header does not include the specification
-        // of either the color format or color sub-sampling in profile 0. VP10
+        // of either the color format or color sub-sampling in profile 0. AV1
         // specifies that the default color format should be YUV 4:2:0 in this
         // case (normative).
         cm->color_space = VPX_CS_BT_601;
         cm->color_range = 0;
         cm->subsampling_y = cm->subsampling_x = 1;
         cm->bit_depth = VPX_BITS_8;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         cm->use_highbitdepth = 0;
 #endif
       }
@@ -2037,20 +2037,20 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
 
       for (i = 0; i < REFS_PER_FRAME; ++i) {
         RefBuffer *const ref_buf = &cm->frame_refs[i];
-#if CONFIG_VPX_HIGHBITDEPTH
-        vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+        av1_setup_scale_factors_for_frame(
             &ref_buf->sf, ref_buf->buf->y_crop_width,
             ref_buf->buf->y_crop_height, cm->width, cm->height,
             cm->use_highbitdepth);
 #else
-        vp10_setup_scale_factors_for_frame(
+        av1_setup_scale_factors_for_frame(
             &ref_buf->sf, ref_buf->buf->y_crop_width,
             ref_buf->buf->y_crop_height, cm->width, cm->height);
 #endif
       }
     }
   }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   get_frame_new_buffer(cm)->bit_depth = cm->bit_depth;
 #endif
   get_frame_new_buffer(cm)->color_space = cm->color_space;
@@ -2081,7 +2081,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
     cm->refresh_frame_context = REFRESH_FRAME_CONTEXT_OFF;
   }
 
-  // This flag will be overridden by the call to vp10_setup_past_independence
+  // This flag will be overridden by the call to av1_setup_past_independence
   // below, forcing the use of context 0 for those frame types.
   cm->frame_context_idx = aom_rb_read_literal(rb, FRAME_CONTEXTS_LOG2);
 
@@ -2110,7 +2110,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
   pbi->hold_ref_buf = 1;
 
   if (frame_is_intra_only(cm) || cm->error_resilient_mode)
-    vp10_setup_past_independence(cm);
+    av1_setup_past_independence(cm);
 
   setup_loopfilter(&cm->lf, rb);
 #if CONFIG_CLPF
@@ -2120,7 +2120,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
   setup_dering(cm, rb);
 #endif
   setup_quantization(cm, rb);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   xd->bd = (int)cm->bit_depth;
 #endif
 
@@ -2130,7 +2130,7 @@ static size_t read_uncompressed_header(VP10Decoder *pbi,
     int i;
     for (i = 0; i < MAX_SEGMENTS; ++i) {
       const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
-                             ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                             ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
                              : cm->base_qindex;
       xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
                         cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2160,20 +2160,20 @@ static void read_ext_tx_probs(FRAME_CONTEXT *fc, aom_reader *r) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (j = 0; j < TX_TYPES; ++j)
         for (k = 0; k < TX_TYPES - 1; ++k)
-          vp10_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
+          av1_diff_update_prob(r, &fc->intra_ext_tx_prob[i][j][k]);
     }
   }
   if (aom_read(r, GROUP_DIFF_UPDATE_PROB)) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (k = 0; k < TX_TYPES - 1; ++k)
-        vp10_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
+        av1_diff_update_prob(r, &fc->inter_ext_tx_prob[i][k]);
     }
   }
 }
 
-static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
+static int read_compressed_header(AV1Decoder *pbi, const uint8_t *data,
                                   size_t partition_size) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
 #if !CONFIG_MISC_FIXES
   MACROBLOCKD *const xd = &pbi->mb;
 #endif
@@ -2193,34 +2193,34 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
   read_coef_probs(fc, cm->tx_mode, &r);
 
   for (k = 0; k < SKIP_CONTEXTS; ++k)
-    vp10_diff_update_prob(&r, &fc->skip_probs[k]);
+    av1_diff_update_prob(&r, &fc->skip_probs[k]);
 
 #if CONFIG_MISC_FIXES
   if (cm->seg.enabled) {
     if (cm->seg.temporal_update) {
       for (k = 0; k < PREDICTION_PROBS; k++)
-        vp10_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
+        av1_diff_update_prob(&r, &cm->fc->seg.pred_probs[k]);
     }
     for (k = 0; k < MAX_SEGMENTS - 1; k++)
-      vp10_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
+      av1_diff_update_prob(&r, &cm->fc->seg.tree_probs[k]);
   }
 
   for (j = 0; j < INTRA_MODES; j++)
     for (i = 0; i < INTRA_MODES - 1; ++i)
-      vp10_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
+      av1_diff_update_prob(&r, &fc->uv_mode_prob[j][i]);
 
   for (j = 0; j < PARTITION_CONTEXTS; ++j)
     for (i = 0; i < PARTITION_TYPES - 1; ++i)
-      vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+      av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
 #endif
 
   if (frame_is_intra_only(cm)) {
-    vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+    av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
 #if CONFIG_MISC_FIXES
     for (k = 0; k < INTRA_MODES; k++)
       for (j = 0; j < INTRA_MODES; j++)
         for (i = 0; i < INTRA_MODES - 1; ++i)
-          vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
+          av1_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
 #endif
   } else {
     nmv_context *const nmvc = &fc->nmvc;
@@ -2230,7 +2230,7 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
     if (cm->interp_filter == SWITCHABLE) read_switchable_interp_probs(fc, &r);
 
     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
-      vp10_diff_update_prob(&r, &fc->intra_inter_prob[i]);
+      av1_diff_update_prob(&r, &fc->intra_inter_prob[i]);
 
 #if !CONFIG_MISC_FIXES
     cm->reference_mode = read_frame_reference_mode(cm, &r);
@@ -2241,12 +2241,12 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
 
     for (j = 0; j < BLOCK_SIZE_GROUPS; j++)
       for (i = 0; i < INTRA_MODES - 1; ++i)
-        vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
+        av1_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
 
 #if !CONFIG_MISC_FIXES
     for (j = 0; j < PARTITION_CONTEXTS; ++j)
       for (i = 0; i < PARTITION_TYPES - 1; ++i)
-        vp10_diff_update_prob(&r, &fc->partition_prob[j][i]);
+        av1_diff_update_prob(&r, &fc->partition_prob[j][i]);
 #endif
 
     read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
@@ -2261,9 +2261,9 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
 #else   // !NDEBUG
 // Counts should only be incremented when frame_parallel_decoding_mode and
 // error_resilient_mode are disabled.
-static void debug_check_frame_counts(const VP10_COMMON *const cm) {
+static void debug_check_frame_counts(const AV1_COMMON *const cm) {
   FRAME_COUNTS zero_counts;
-  vp10_zero(zero_counts);
+  av1_zero(zero_counts);
   assert(cm->refresh_frame_context != REFRESH_FRAME_CONTEXT_BACKWARD ||
          cm->error_resilient_mode);
   assert(!memcmp(cm->counts.y_mode, zero_counts.y_mode,
@@ -2298,13 +2298,13 @@ static void debug_check_frame_counts(const VP10_COMMON *const cm) {
 #endif  // NDEBUG
 
 static struct aom_read_bit_buffer *init_read_bit_buffer(
-    VP10Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
-    const uint8_t *data_end, uint8_t clear_data[MAX_VP10_HEADER_SIZE]) {
+    AV1Decoder *pbi, struct aom_read_bit_buffer *rb, const uint8_t *data,
+    const uint8_t *data_end, uint8_t clear_data[MAX_AV1_HEADER_SIZE]) {
   rb->bit_offset = 0;
   rb->error_handler = error_handler;
   rb->error_handler_data = &pbi->common;
   if (pbi->decrypt_cb) {
-    const int n = (int)VPXMIN(MAX_VP10_HEADER_SIZE, data_end - data);
+    const int n = (int)VPXMIN(MAX_AV1_HEADER_SIZE, data_end - data);
     pbi->decrypt_cb(pbi->decrypt_state, data, clear_data, n);
     rb->bit_buffer = clear_data;
     rb->bit_buffer_end = clear_data + n;
@@ -2317,32 +2317,32 @@ static struct aom_read_bit_buffer *init_read_bit_buffer(
 
 //------------------------------------------------------------------------------
 
-int vp10_read_sync_code(struct aom_read_bit_buffer *const rb) {
-  return aom_rb_read_literal(rb, 8) == VP10_SYNC_CODE_0 &&
-         aom_rb_read_literal(rb, 8) == VP10_SYNC_CODE_1 &&
-         aom_rb_read_literal(rb, 8) == VP10_SYNC_CODE_2;
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb) {
+  return aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_0 &&
+         aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_1 &&
+         aom_rb_read_literal(rb, 8) == AV1_SYNC_CODE_2;
 }
 
-void vp10_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
                           int *height) {
   *width = aom_rb_read_literal(rb, 16) + 1;
   *height = aom_rb_read_literal(rb, 16) + 1;
 }
 
-BITSTREAM_PROFILE vp10_read_profile(struct aom_read_bit_buffer *rb) {
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb) {
   int profile = aom_rb_read_bit(rb);
   profile |= aom_rb_read_bit(rb) << 1;
   if (profile > 2) profile += aom_rb_read_bit(rb);
   return (BITSTREAM_PROFILE)profile;
 }
 
-void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
+void av1_decode_frame(AV1Decoder *pbi, const uint8_t *data,
                        const uint8_t *data_end, const uint8_t **p_data_end) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   MACROBLOCKD *const xd = &pbi->mb;
   struct aom_read_bit_buffer rb;
   int context_updated = 0;
-  uint8_t clear_data[MAX_VP10_HEADER_SIZE];
+  uint8_t clear_data[MAX_AV1_HEADER_SIZE];
   const size_t first_partition_size = read_uncompressed_header(
       pbi, init_read_bit_buffer(pbi, &rb, data, data_end, clear_data));
   const int tile_rows = 1 << cm->log2_tile_rows;
@@ -2366,14 +2366,14 @@ void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
       cm->height == cm->last_height && !cm->last_intra_only &&
       cm->last_show_frame && (cm->last_frame_type != KEY_FRAME);
 
-  vp10_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
+  av1_setup_block_planes(xd, cm->subsampling_x, cm->subsampling_y);
 
   *cm->fc = cm->frame_contexts[cm->frame_context_idx];
   if (!cm->fc->initialized)
     aom_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
                        "Uninitialized entropy context.");
 
-  vp10_zero(cm->counts);
+  av1_zero(cm->counts);
 
   xd->corrupted = 0;
   new_fb->corrupted = read_compressed_header(pbi, data, first_partition_size);
@@ -2382,7 +2382,7 @@ void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
                        "Decode failed. Frame data header is corrupted.");
 
   if (cm->lf.filter_level && !cm->skip_loop_filter) {
-    vp10_loop_filter_frame_init(cm, cm->lf.filter_level);
+    av1_loop_filter_frame_init(cm, cm->lf.filter_level);
   }
 
   // If encoded in frame parallel mode, frame context is ready after decoding
@@ -2395,13 +2395,13 @@ void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
       context_updated = 1;
       cm->frame_contexts[cm->frame_context_idx] = *cm->fc;
     }
-    vp10_frameworker_lock_stats(worker);
+    av1_frameworker_lock_stats(worker);
     pbi->cur_buf->row = -1;
     pbi->cur_buf->col = -1;
     frame_worker_data->frame_context_ready = 1;
     // Signal the main thread that context is ready.
-    vp10_frameworker_signal_stats(worker);
-    vp10_frameworker_unlock_stats(worker);
+    av1_frameworker_signal_stats(worker);
+    av1_frameworker_unlock_stats(worker);
   }
 
   if (pbi->max_threads > 1 && tile_rows == 1 && tile_cols > 1) {
@@ -2411,7 +2411,7 @@ void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
       if (!cm->skip_loop_filter) {
         // If multiple threads are used to decode tiles, then we use those
         // threads to do parallel loopfiltering.
-        vp10_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
+        av1_loop_filter_frame_mt(new_fb, cm, pbi->mb.plane,
                                   cm->lf.filter_level, 0, 0, pbi->tile_workers,
                                   pbi->num_tile_workers, &pbi->lf_row_sync);
       }
@@ -2425,17 +2425,17 @@ void vp10_decode_frame(VP10Decoder *pbi, const uint8_t *data,
 
   if (!xd->corrupted) {
     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
-      vp10_adapt_coef_probs(cm);
+      av1_adapt_coef_probs(cm);
 #if CONFIG_MISC_FIXES
-      vp10_adapt_intra_frame_probs(cm);
+      av1_adapt_intra_frame_probs(cm);
 #endif
 
       if (!frame_is_intra_only(cm)) {
 #if !CONFIG_MISC_FIXES
-        vp10_adapt_intra_frame_probs(cm);
+        av1_adapt_intra_frame_probs(cm);
 #endif
-        vp10_adapt_inter_frame_probs(cm);
-        vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+        av1_adapt_inter_frame_probs(cm);
+        av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
       }
     } else {
       debug_check_frame_counts(cm);
diff --git a/av1/decoder/decodeframe.h b/av1/decoder/decodeframe.h
index a3908a134a7bf81cd87e05ec2a527eb8522880a0..f6e35a4572722aace030f2733c4ca3d6bf7ac8df 100644
--- a/av1/decoder/decodeframe.h
+++ b/av1/decoder/decodeframe.h
@@ -9,26 +9,26 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_DECODER_DECODEFRAME_H_
-#define VP10_DECODER_DECODEFRAME_H_
+#ifndef AV1_DECODER_DECODEFRAME_H_
+#define AV1_DECODER_DECODEFRAME_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10Decoder;
+struct AV1Decoder;
 struct aom_read_bit_buffer;
 
-int vp10_read_sync_code(struct aom_read_bit_buffer *const rb);
-void vp10_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
+int av1_read_sync_code(struct aom_read_bit_buffer *const rb);
+void av1_read_frame_size(struct aom_read_bit_buffer *rb, int *width,
                           int *height);
-BITSTREAM_PROFILE vp10_read_profile(struct aom_read_bit_buffer *rb);
+BITSTREAM_PROFILE av1_read_profile(struct aom_read_bit_buffer *rb);
 
-void vp10_decode_frame(struct VP10Decoder *pbi, const uint8_t *data,
+void av1_decode_frame(struct AV1Decoder *pbi, const uint8_t *data,
                        const uint8_t *data_end, const uint8_t **p_data_end);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DECODEFRAME_H_
+#endif  // AV1_DECODER_DECODEFRAME_H_
diff --git a/av1/decoder/decodemv.c b/av1/decoder/decodemv.c
index c4c550b4a7382bdf2a4660e27d823e1d9b3f37cc..870dc5c114f1d33198cecfd280a4e19ad39f2bd8 100644
--- a/av1/decoder/decodemv.c
+++ b/av1/decoder/decodemv.c
@@ -26,10 +26,10 @@
 #include "aom_dsp/aom_dsp_common.h"
 
 static PREDICTION_MODE read_intra_mode(aom_reader *r, const aom_prob *p) {
-  return (PREDICTION_MODE)aom_read_tree(r, vp10_intra_mode_tree, p);
+  return (PREDICTION_MODE)aom_read_tree(r, av1_intra_mode_tree, p);
 }
 
-static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_intra_mode_y(AV1_COMMON *cm, MACROBLOCKD *xd,
                                          aom_reader *r, int size_group) {
   const PREDICTION_MODE y_mode =
       read_intra_mode(r, cm->fc->y_mode_prob[size_group]);
@@ -38,7 +38,7 @@ static PREDICTION_MODE read_intra_mode_y(VP10_COMMON *cm, MACROBLOCKD *xd,
   return y_mode;
 }
 
-static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_intra_mode_uv(AV1_COMMON *cm, MACROBLOCKD *xd,
                                           aom_reader *r,
                                           PREDICTION_MODE y_mode) {
   const PREDICTION_MODE uv_mode =
@@ -48,10 +48,10 @@ static PREDICTION_MODE read_intra_mode_uv(VP10_COMMON *cm, MACROBLOCKD *xd,
   return uv_mode;
 }
 
-static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
+static PREDICTION_MODE read_inter_mode(AV1_COMMON *cm, MACROBLOCKD *xd,
                                        aom_reader *r, int ctx) {
   const int mode =
-      aom_read_tree(r, vp10_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
+      aom_read_tree(r, av1_inter_mode_tree, cm->fc->inter_mode_probs[ctx]);
   FRAME_COUNTS *counts = xd->counts;
   if (counts) ++counts->inter_mode[ctx][mode];
 
@@ -60,10 +60,10 @@ static PREDICTION_MODE read_inter_mode(VP10_COMMON *cm, MACROBLOCKD *xd,
 
 static int read_segment_id(aom_reader *r,
                            const struct segmentation_probs *segp) {
-  return aom_read_tree(r, vp10_segment_tree, segp->tree_probs);
+  return aom_read_tree(r, av1_segment_tree, segp->tree_probs);
 }
 
-static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
+static TX_SIZE read_selected_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd,
                                      TX_SIZE max_tx_size, aom_reader *r) {
   FRAME_COUNTS *counts = xd->counts;
   const int ctx = get_tx_size_context(xd);
@@ -79,7 +79,7 @@ static TX_SIZE read_selected_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd,
   return (TX_SIZE)tx_size;
 }
 
-static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, int allow_select,
+static TX_SIZE read_tx_size(AV1_COMMON *cm, MACROBLOCKD *xd, int allow_select,
                             aom_reader *r) {
   TX_MODE tx_mode = cm->tx_mode;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
@@ -91,7 +91,7 @@ static TX_SIZE read_tx_size(VP10_COMMON *cm, MACROBLOCKD *xd, int allow_select,
     return VPXMIN(max_tx_size, tx_mode_to_biggest_tx_size[tx_mode]);
 }
 
-static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
+static int dec_get_segment_id(const AV1_COMMON *cm, const uint8_t *segment_ids,
                               int mi_offset, int x_mis, int y_mis) {
   int x, y, segment_id = INT_MAX;
 
@@ -104,7 +104,7 @@ static int dec_get_segment_id(const VP10_COMMON *cm, const uint8_t *segment_ids,
   return segment_id;
 }
 
-static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
+static void set_segment_id(AV1_COMMON *cm, int mi_offset, int x_mis, int y_mis,
                            int segment_id) {
   int x, y;
 
@@ -115,7 +115,7 @@ static void set_segment_id(VP10_COMMON *cm, int mi_offset, int x_mis, int y_mis,
       cm->current_frame_seg_map[mi_offset + y * cm->mi_cols + x] = segment_id;
 }
 
-static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_intra_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                  int mi_offset, int x_mis, int y_mis,
                                  aom_reader *r) {
   struct segmentation *const seg = &cm->seg;
@@ -143,7 +143,7 @@ static int read_intra_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   return segment_id;
 }
 
-static void copy_segment_id(const VP10_COMMON *cm,
+static void copy_segment_id(const AV1_COMMON *cm,
                             const uint8_t *last_segment_ids,
                             uint8_t *current_segment_ids, int mi_offset,
                             int x_mis, int y_mis) {
@@ -156,7 +156,7 @@ static void copy_segment_id(const VP10_COMMON *cm,
                            : 0;
 }
 
-static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_inter_segment_id(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                  int mi_row, int mi_col, aom_reader *r) {
   struct segmentation *const seg = &cm->seg;
 #if CONFIG_MISC_FIXES
@@ -189,7 +189,7 @@ static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   }
 
   if (seg->temporal_update) {
-    const int ctx = vp10_get_pred_context_seg_id(xd);
+    const int ctx = av1_get_pred_context_seg_id(xd);
     const aom_prob pred_prob = segp->pred_probs[ctx];
     mbmi->seg_id_predicted = aom_read(r, pred_prob);
 #if CONFIG_MISC_FIXES
@@ -213,12 +213,12 @@ static int read_inter_segment_id(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   return segment_id;
 }
 
-static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
+static int read_skip(AV1_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
                      aom_reader *r) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
     return 1;
   } else {
-    const int ctx = vp10_get_skip_context(xd);
+    const int ctx = av1_get_skip_context(xd);
     const int skip = aom_read(r, cm->fc->skip_probs[ctx]);
     FRAME_COUNTS *counts = xd->counts;
     if (counts) ++counts->skip[ctx][skip];
@@ -226,7 +226,7 @@ static int read_skip(VP10_COMMON *cm, const MACROBLOCKD *xd, int segment_id,
   }
 }
 
-static void read_intra_frame_mode_info(VP10_COMMON *const cm,
+static void read_intra_frame_mode_info(AV1_COMMON *const cm,
                                        MACROBLOCKD *const xd, int mi_row,
                                        int mi_col, aom_reader *r) {
   MODE_INFO *const mi = xd->mi[0];
@@ -280,7 +280,7 @@ static void read_intra_frame_mode_info(VP10_COMMON *const cm,
     FRAME_COUNTS *counts = xd->counts;
     TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
     mbmi->tx_type =
-        aom_read_tree(r, vp10_ext_tx_tree,
+        aom_read_tree(r, av1_ext_tx_tree,
                       cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
     if (counts)
       ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -293,12 +293,12 @@ static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
                              int usehp) {
   int mag, d, fr, hp;
   const int sign = aom_read(r, mvcomp->sign);
-  const int mv_class = aom_read_tree(r, vp10_mv_class_tree, mvcomp->classes);
+  const int mv_class = aom_read_tree(r, av1_mv_class_tree, mvcomp->classes);
   const int class0 = mv_class == MV_CLASS_0;
 
   // Integer part
   if (class0) {
-    d = aom_read_tree(r, vp10_mv_class0_tree, mvcomp->class0);
+    d = aom_read_tree(r, av1_mv_class0_tree, mvcomp->class0);
     mag = 0;
   } else {
     int i;
@@ -310,7 +310,7 @@ static int read_mv_component(aom_reader *r, const nmv_component *mvcomp,
   }
 
   // Fractional part
-  fr = aom_read_tree(r, vp10_mv_fp_tree,
+  fr = aom_read_tree(r, av1_mv_fp_tree,
                      class0 ? mvcomp->class0_fp[d] : mvcomp->fp);
 
   // High precision part (if hp is not used, the default value of the hp is 1)
@@ -325,8 +325,8 @@ static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
                            const nmv_context *ctx, nmv_context_counts *counts,
                            int allow_hp) {
   const MV_JOINT_TYPE joint_type =
-      (MV_JOINT_TYPE)aom_read_tree(r, vp10_mv_joint_tree, ctx->joints);
-  const int use_hp = allow_hp && vp10_use_mv_hp(ref);
+      (MV_JOINT_TYPE)aom_read_tree(r, av1_mv_joint_tree, ctx->joints);
+  const int use_hp = allow_hp && av1_use_mv_hp(ref);
   MV diff = { 0, 0 };
 
   if (mv_joint_vertical(joint_type))
@@ -335,17 +335,17 @@ static INLINE void read_mv(aom_reader *r, MV *mv, const MV *ref,
   if (mv_joint_horizontal(joint_type))
     diff.col = read_mv_component(r, &ctx->comps[1], use_hp);
 
-  vp10_inc_mv(&diff, counts, use_hp);
+  av1_inc_mv(&diff, counts, use_hp);
 
   mv->row = ref->row + diff.row;
   mv->col = ref->col + diff.col;
 }
 
-static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
+static REFERENCE_MODE read_block_reference_mode(AV1_COMMON *cm,
                                                 const MACROBLOCKD *xd,
                                                 aom_reader *r) {
   if (cm->reference_mode == REFERENCE_MODE_SELECT) {
-    const int ctx = vp10_get_reference_mode_context(cm, xd);
+    const int ctx = av1_get_reference_mode_context(cm, xd);
     const REFERENCE_MODE mode =
         (REFERENCE_MODE)aom_read(r, cm->fc->comp_inter_prob[ctx]);
     FRAME_COUNTS *counts = xd->counts;
@@ -357,7 +357,7 @@ static REFERENCE_MODE read_block_reference_mode(VP10_COMMON *cm,
 }
 
 // Read the referncence frame
-static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static void read_ref_frames(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                             aom_reader *r, int segment_id,
                             MV_REFERENCE_FRAME ref_frame[2]) {
   FRAME_CONTEXT *const fc = cm->fc;
@@ -372,17 +372,17 @@ static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
     // FIXME(rbultje) I'm pretty sure this breaks segmentation ref frame coding
     if (mode == COMPOUND_REFERENCE) {
       const int idx = cm->ref_frame_sign_bias[cm->comp_fixed_ref];
-      const int ctx = vp10_get_pred_context_comp_ref_p(cm, xd);
+      const int ctx = av1_get_pred_context_comp_ref_p(cm, xd);
       const int bit = aom_read(r, fc->comp_ref_prob[ctx]);
       if (counts) ++counts->comp_ref[ctx][bit];
       ref_frame[idx] = cm->comp_fixed_ref;
       ref_frame[!idx] = cm->comp_var_ref[bit];
     } else if (mode == SINGLE_REFERENCE) {
-      const int ctx0 = vp10_get_pred_context_single_ref_p1(xd);
+      const int ctx0 = av1_get_pred_context_single_ref_p1(xd);
       const int bit0 = aom_read(r, fc->single_ref_prob[ctx0][0]);
       if (counts) ++counts->single_ref[ctx0][0][bit0];
       if (bit0) {
-        const int ctx1 = vp10_get_pred_context_single_ref_p2(xd);
+        const int ctx1 = av1_get_pred_context_single_ref_p2(xd);
         const int bit1 = aom_read(r, fc->single_ref_prob[ctx1][1]);
         if (counts) ++counts->single_ref[ctx1][1][bit1];
         ref_frame[0] = bit1 ? ALTREF_FRAME : GOLDEN_FRAME;
@@ -397,18 +397,18 @@ static void read_ref_frames(VP10_COMMON *const cm, MACROBLOCKD *const xd,
   }
 }
 
-static INLINE INTERP_FILTER read_switchable_interp_filter(VP10_COMMON *const cm,
+static INLINE INTERP_FILTER read_switchable_interp_filter(AV1_COMMON *const cm,
                                                           MACROBLOCKD *const xd,
                                                           aom_reader *r) {
-  const int ctx = vp10_get_pred_context_switchable_interp(xd);
+  const int ctx = av1_get_pred_context_switchable_interp(xd);
   const INTERP_FILTER type = (INTERP_FILTER)aom_read_tree(
-      r, vp10_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
+      r, av1_switchable_interp_tree, cm->fc->switchable_interp_prob[ctx]);
   FRAME_COUNTS *counts = xd->counts;
   if (counts) ++counts->switchable_interp[ctx][type];
   return type;
 }
 
-static void read_intra_block_mode_info(VP10_COMMON *const cm,
+static void read_intra_block_mode_info(AV1_COMMON *const cm,
                                        MACROBLOCKD *const xd, MODE_INFO *mi,
                                        aom_reader *r) {
   MB_MODE_INFO *const mbmi = &mi->mbmi;
@@ -446,7 +446,7 @@ static INLINE int is_mv_valid(const MV *mv) {
          mv->col < MV_UPP;
 }
 
-static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE int assign_mv(AV1_COMMON *cm, MACROBLOCKD *xd,
                             PREDICTION_MODE mode, int_mv mv[2],
                             int_mv ref_mv[2], int_mv nearest_mv[2],
                             int_mv near_mv[2], int is_compound, int allow_hp,
@@ -485,12 +485,12 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
   return ret;
 }
 
-static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
+static int read_is_inter_block(AV1_COMMON *const cm, MACROBLOCKD *const xd,
                                int segment_id, aom_reader *r) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_REF_FRAME)) {
     return get_segdata(&cm->seg, segment_id, SEG_LVL_REF_FRAME) != INTRA_FRAME;
   } else {
-    const int ctx = vp10_get_intra_inter_context(xd);
+    const int ctx = av1_get_intra_inter_context(xd);
     const int is_inter = aom_read(r, cm->fc->intra_inter_prob[ctx]);
     FRAME_COUNTS *counts = xd->counts;
     if (counts) ++counts->intra_inter[ctx][is_inter];
@@ -499,16 +499,16 @@ static int read_is_inter_block(VP10_COMMON *const cm, MACROBLOCKD *const xd,
 }
 
 static void fpm_sync(void *const data, int mi_row) {
-  VP10Decoder *const pbi = (VP10Decoder *)data;
-  vp10_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
+  AV1Decoder *const pbi = (AV1Decoder *)data;
+  av1_frameworker_wait(pbi->frame_worker_owner, pbi->common.prev_frame,
                         mi_row << MI_BLOCK_SIZE_LOG2);
 }
 
-static void read_inter_block_mode_info(VP10Decoder *const pbi,
+static void read_inter_block_mode_info(AV1Decoder *const pbi,
                                        MACROBLOCKD *const xd,
                                        MODE_INFO *const mi, int mi_row,
                                        int mi_col, aom_reader *r) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   const BLOCK_SIZE bsize = mbmi->sb_type;
   const int allow_hp = cm->allow_high_precision_mv;
@@ -525,11 +525,11 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
     RefBuffer *ref_buf = &cm->frame_refs[frame - LAST_FRAME];
 
     xd->block_refs[ref] = ref_buf;
-    if ((!vp10_is_valid_scale(&ref_buf->sf)))
+    if ((!av1_is_valid_scale(&ref_buf->sf)))
       aom_internal_error(xd->error_info, VPX_CODEC_UNSUP_BITSTREAM,
                          "Reference frame has invalid dimensions");
-    vp10_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
-    vp10_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame], mi_row, mi_col,
+    av1_setup_pre_planes(xd, ref, ref_buf->buf, mi_row, mi_col, &ref_buf->sf);
+    av1_find_mv_refs(cm, xd, mi, frame, ref_mvs[frame], mi_row, mi_col,
                       fpm_sync, (void *)pbi, inter_mode_ctx);
   }
 
@@ -548,7 +548,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
 
   if (bsize < BLOCK_8X8 || mbmi->mode != ZEROMV) {
     for (ref = 0; ref < 1 + is_compound; ++ref) {
-      vp10_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
+      av1_find_best_ref_mvs(allow_hp, ref_mvs[mbmi->ref_frame[ref]],
                              &nearestmv[ref], &nearmv[ref]);
     }
   }
@@ -572,7 +572,7 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
         if (b_mode == NEARESTMV || b_mode == NEARMV) {
           uint8_t dummy_mode_ctx[MAX_REF_FRAMES];
           for (ref = 0; ref < 1 + is_compound; ++ref)
-            vp10_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
+            av1_append_sub8x8_mvs_for_idx(cm, xd, j, ref, mi_row, mi_col,
                                            &nearest_sub8x8[ref],
                                            &near_sub8x8[ref], dummy_mode_ctx);
         }
@@ -601,10 +601,10 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
   }
 }
 
-static void read_inter_frame_mode_info(VP10Decoder *const pbi,
+static void read_inter_frame_mode_info(AV1Decoder *const pbi,
                                        MACROBLOCKD *const xd, int mi_row,
                                        int mi_col, aom_reader *r) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   MODE_INFO *const mi = xd->mi[0];
   MB_MODE_INFO *const mbmi = &mi->mbmi;
   int inter_block;
@@ -625,13 +625,13 @@ static void read_inter_frame_mode_info(VP10Decoder *const pbi,
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     FRAME_COUNTS *counts = xd->counts;
     if (inter_block) {
-      mbmi->tx_type = aom_read_tree(r, vp10_ext_tx_tree,
+      mbmi->tx_type = aom_read_tree(r, av1_ext_tx_tree,
                                     cm->fc->inter_ext_tx_prob[mbmi->tx_size]);
       if (counts) ++counts->inter_ext_tx[mbmi->tx_size][mbmi->tx_type];
     } else {
       const TX_TYPE tx_type_nom = intra_mode_to_tx_type_context[mbmi->mode];
       mbmi->tx_type =
-          aom_read_tree(r, vp10_ext_tx_tree,
+          aom_read_tree(r, av1_ext_tx_tree,
                         cm->fc->intra_ext_tx_prob[mbmi->tx_size][tx_type_nom]);
       if (counts)
         ++counts->intra_ext_tx[mbmi->tx_size][tx_type_nom][mbmi->tx_type];
@@ -641,9 +641,9 @@ static void read_inter_frame_mode_info(VP10Decoder *const pbi,
   }
 }
 
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
                          int mi_col, aom_reader *r, int x_mis, int y_mis) {
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   MODE_INFO *const mi = xd->mi[0];
   MV_REF *frame_mvs = cm->cur_frame->mvs + mi_row * cm->mi_cols + mi_col;
   int w, h;
diff --git a/av1/decoder/decodemv.h b/av1/decoder/decodemv.h
index 22b8eeae0bd230d0dc6f161e056bd6fb6641ad57..6ae3df4b9b0d1330e7b0879bedaa7555a7e382a1 100644
--- a/av1/decoder/decodemv.h
+++ b/av1/decoder/decodemv.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_DECODER_DECODEMV_H_
-#define VP10_DECODER_DECODEMV_H_
+#ifndef AV1_DECODER_DECODEMV_H_
+#define AV1_DECODER_DECODEMV_H_
 
 #include "aom_dsp/bitreader.h"
 
@@ -20,11 +20,11 @@
 extern "C" {
 #endif
 
-void vp10_read_mode_info(VP10Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
+void av1_read_mode_info(AV1Decoder *const pbi, MACROBLOCKD *xd, int mi_row,
                          int mi_col, aom_reader *r, int x_mis, int y_mis);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DECODEMV_H_
+#endif  // AV1_DECODER_DECODEMV_H_
diff --git a/av1/decoder/decoder.c b/av1/decoder/decoder.c
index 97b96e065d9d249f21efe6a66a0bc55c206921dd..80d14bb23ff5a807f8d6d81d8bbf446ae650830c 100644
--- a/av1/decoder/decoder.c
+++ b/av1/decoder/decoder.c
@@ -41,19 +41,19 @@ static void initialize_dec(void) {
     av1_rtcd();
     aom_dsp_rtcd();
     aom_scale_rtcd();
-    vp10_init_intra_predictors();
+    av1_init_intra_predictors();
     init_done = 1;
   }
 }
 
-static void vp10_dec_setup_mi(VP10_COMMON *cm) {
+static void av1_dec_setup_mi(AV1_COMMON *cm) {
   cm->mi = cm->mip + cm->mi_stride + 1;
   cm->mi_grid_visible = cm->mi_grid_base + cm->mi_stride + 1;
   memset(cm->mi_grid_base, 0,
          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
 }
 
-static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
+static int av1_dec_alloc_mi(AV1_COMMON *cm, int mi_size) {
   cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
   if (!cm->mip) return 1;
   cm->mi_alloc_size = mi_size;
@@ -62,24 +62,24 @@ static int vp10_dec_alloc_mi(VP10_COMMON *cm, int mi_size) {
   return 0;
 }
 
-static void vp10_dec_free_mi(VP10_COMMON *cm) {
+static void av1_dec_free_mi(AV1_COMMON *cm) {
   aom_free(cm->mip);
   cm->mip = NULL;
   aom_free(cm->mi_grid_base);
   cm->mi_grid_base = NULL;
 }
 
-VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
-  VP10Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
-  VP10_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
+AV1Decoder *av1_decoder_create(BufferPool *const pool) {
+  AV1Decoder *volatile const pbi = aom_memalign(32, sizeof(*pbi));
+  AV1_COMMON *volatile const cm = pbi ? &pbi->common : NULL;
 
   if (!cm) return NULL;
 
-  vp10_zero(*pbi);
+  av1_zero(*pbi);
 
   if (setjmp(cm->error.jmp)) {
     cm->error.setjmp = 0;
-    vp10_decoder_remove(pbi);
+    av1_decoder_remove(pbi);
     return NULL;
   }
 
@@ -104,11 +104,11 @@ VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
   cm->bit_depth = VPX_BITS_8;
   cm->dequant_bit_depth = VPX_BITS_8;
 
-  cm->alloc_mi = vp10_dec_alloc_mi;
-  cm->free_mi = vp10_dec_free_mi;
-  cm->setup_mi = vp10_dec_setup_mi;
+  cm->alloc_mi = av1_dec_alloc_mi;
+  cm->free_mi = av1_dec_free_mi;
+  cm->setup_mi = av1_dec_setup_mi;
 
-  vp10_loop_filter_init(cm);
+  av1_loop_filter_init(cm);
 
 #if CONFIG_AOM_QM
   aom_qm_init(cm);
@@ -121,7 +121,7 @@ VP10Decoder *vp10_decoder_create(BufferPool *const pool) {
   return pbi;
 }
 
-void vp10_decoder_remove(VP10Decoder *pbi) {
+void av1_decoder_remove(AV1Decoder *pbi) {
   int i;
 
   if (!pbi) return;
@@ -138,7 +138,7 @@ void vp10_decoder_remove(VP10Decoder *pbi) {
   aom_free(pbi->tile_workers);
 
   if (pbi->num_tile_workers > 0) {
-    vp10_loop_filter_dealloc(&pbi->lf_row_sync);
+    av1_loop_filter_dealloc(&pbi->lf_row_sync);
   }
 
   aom_free(pbi);
@@ -150,15 +150,15 @@ static int equal_dimensions(const YV12_BUFFER_CONFIG *a,
          a->uv_height == b->uv_height && a->uv_width == b->uv_width;
 }
 
-aom_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
+aom_codec_err_t av1_copy_reference_dec(AV1Decoder *pbi,
                                         VPX_REFFRAME ref_frame_flag,
                                         YV12_BUFFER_CONFIG *sd) {
-  VP10_COMMON *cm = &pbi->common;
+  AV1_COMMON *cm = &pbi->common;
 
   /* TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
    * encoder is using the frame buffers for. This is just a stub to keep the
    * aomenc --test-decode functionality working, and will be replaced in a
-   * later commit that adds VP10-specific controls for this functionality.
+   * later commit that adds AV1-specific controls for this functionality.
    */
   if (ref_frame_flag == VPX_LAST_FLAG) {
     const YV12_BUFFER_CONFIG *const cfg = get_ref_frame(cm, 0);
@@ -179,7 +179,7 @@ aom_codec_err_t vp10_copy_reference_dec(VP10Decoder *pbi,
   return cm->error.error_code;
 }
 
-aom_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
                                        VPX_REFFRAME ref_frame_flag,
                                        YV12_BUFFER_CONFIG *sd) {
   RefBuffer *ref_buf = NULL;
@@ -188,7 +188,7 @@ aom_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
   // TODO(jkoleszar): The decoder doesn't have any real knowledge of what the
   // encoder is using the frame buffers for. This is just a stub to keep the
   // aomenc --test-decode functionality working, and will be replaced in a
-  // later commit that adds VP10-specific controls for this functionality.
+  // later commit that adds AV1-specific controls for this functionality.
   if (ref_frame_flag == VPX_LAST_FLAG) {
     ref_buf = &cm->frame_refs[0];
   } else if (ref_frame_flag == VPX_GOLD_FLAG) {
@@ -224,9 +224,9 @@ aom_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
 }
 
 /* If any buffer updating is signaled it should be done here. */
-static void swap_frame_buffers(VP10Decoder *pbi) {
+static void swap_frame_buffers(AV1Decoder *pbi) {
   int ref_index = 0, mask;
-  VP10_COMMON *const cm = &pbi->common;
+  AV1_COMMON *const cm = &pbi->common;
   BufferPool *const pool = cm->buffer_pool;
   RefCntBuffer *const frame_bufs = cm->buffer_pool->frame_bufs;
 
@@ -265,9 +265,9 @@ static void swap_frame_buffers(VP10Decoder *pbi) {
     cm->frame_refs[ref_index].idx = -1;
 }
 
-int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
+int av1_receive_compressed_data(AV1Decoder *pbi, size_t size,
                                  const uint8_t **psource) {
-  VP10_COMMON *volatile const cm = &pbi->common;
+  AV1_COMMON *volatile const cm = &pbi->common;
   BufferPool *volatile const pool = cm->buffer_pool;
   RefCntBuffer *volatile const frame_bufs = cm->buffer_pool->frame_bufs;
   const uint8_t *source = *psource;
@@ -307,13 +307,13 @@ int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
   pbi->hold_ref_buf = 0;
   if (cm->frame_parallel_decode) {
     VPxWorker *const worker = pbi->frame_worker_owner;
-    vp10_frameworker_lock_stats(worker);
+    av1_frameworker_lock_stats(worker);
     frame_bufs[cm->new_fb_idx].frame_worker_owner = worker;
     // Reset decoding progress.
     pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
     pbi->cur_buf->row = -1;
     pbi->cur_buf->col = -1;
-    vp10_frameworker_unlock_stats(worker);
+    av1_frameworker_unlock_stats(worker);
   } else {
     pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
   }
@@ -364,7 +364,7 @@ int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
   }
 
   cm->error.setjmp = 1;
-  vp10_decode_frame(pbi, source, source + size, psource);
+  av1_decode_frame(pbi, source, source + size, psource);
 
   swap_frame_buffers(pbi);
 
@@ -374,7 +374,7 @@ int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
     cm->last_show_frame = cm->show_frame;
     cm->prev_frame = cm->cur_frame;
     if (cm->seg.enabled && !cm->frame_parallel_decode)
-      vp10_swap_current_and_last_seg_map(cm);
+      av1_swap_current_and_last_seg_map(cm);
   }
 
   // Update progress in frame parallel decode.
@@ -383,15 +383,15 @@ int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
     // be accessing this buffer.
     VPxWorker *const worker = pbi->frame_worker_owner;
     FrameWorkerData *const frame_worker_data = worker->data1;
-    vp10_frameworker_lock_stats(worker);
+    av1_frameworker_lock_stats(worker);
 
     if (cm->show_frame) {
       cm->current_video_frame++;
     }
     frame_worker_data->frame_decoded = 1;
     frame_worker_data->frame_context_ready = 1;
-    vp10_frameworker_signal_stats(worker);
-    vp10_frameworker_unlock_stats(worker);
+    av1_frameworker_signal_stats(worker);
+    av1_frameworker_unlock_stats(worker);
   } else {
     cm->last_width = cm->width;
     cm->last_height = cm->height;
@@ -404,8 +404,8 @@ int vp10_receive_compressed_data(VP10Decoder *pbi, size_t size,
   return retcode;
 }
 
-int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
-  VP10_COMMON *const cm = &pbi->common;
+int av1_get_raw_frame(AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
+  AV1_COMMON *const cm = &pbi->common;
   int ret = -1;
 
   if (pbi->ready_for_new_data == 1) return ret;
@@ -423,7 +423,7 @@ int vp10_get_raw_frame(VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd) {
   return ret;
 }
 
-aom_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
                                             uint32_t sizes[8], int *count,
                                             aom_decrypt_cb decrypt_cb,
                                             void *decrypt_state) {
diff --git a/av1/decoder/decoder.h b/av1/decoder/decoder.h
index e2b97386cf6db0aff600481a44764b7982dbc12a..7cf8abb988cfff5c8a7fdc516366740755970f39 100644
--- a/av1/decoder/decoder.h
+++ b/av1/decoder/decoder.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_DECODER_DECODER_H_
-#define VP10_DECODER_DECODER_H_
+#ifndef AV1_DECODER_DECODER_H_
+#define AV1_DECODER_DECODER_H_
 
 #include "./aom_config.h"
 
@@ -29,7 +29,7 @@ extern "C" {
 
 // TODO(hkuang): combine this with TileWorkerData.
 typedef struct TileData {
-  VP10_COMMON *cm;
+  AV1_COMMON *cm;
   aom_reader bit_reader;
   DECLARE_ALIGNED(16, MACROBLOCKD, xd);
   /* dqcoeff are shared by all the planes. So planes must be decoded serially */
@@ -38,7 +38,7 @@ typedef struct TileData {
 } TileData;
 
 typedef struct TileWorkerData {
-  struct VP10Decoder *pbi;
+  struct AV1Decoder *pbi;
   aom_reader bit_reader;
   FRAME_COUNTS counts;
   DECLARE_ALIGNED(16, MACROBLOCKD, xd);
@@ -48,10 +48,10 @@ typedef struct TileWorkerData {
   struct aom_internal_error_info error_info;
 } TileWorkerData;
 
-typedef struct VP10Decoder {
+typedef struct AV1Decoder {
   DECLARE_ALIGNED(16, MACROBLOCKD, mb);
 
-  DECLARE_ALIGNED(16, VP10_COMMON, common);
+  DECLARE_ALIGNED(16, AV1_COMMON, common);
 
   int ready_for_new_data;
 
@@ -71,7 +71,7 @@ typedef struct VP10Decoder {
   TileData *tile_data;
   int total_tiles;
 
-  VP10LfSync lf_row_sync;
+  AV1LfSync lf_row_sync;
 
   aom_decrypt_cb decrypt_cb;
   void *decrypt_state;
@@ -80,18 +80,18 @@ typedef struct VP10Decoder {
   int inv_tile_order;
   int need_resync;   // wait for key/intra-only frame.
   int hold_ref_buf;  // hold the reference buffer.
-} VP10Decoder;
+} AV1Decoder;
 
-int vp10_receive_compressed_data(struct VP10Decoder *pbi, size_t size,
+int av1_receive_compressed_data(struct AV1Decoder *pbi, size_t size,
                                  const uint8_t **dest);
 
-int vp10_get_raw_frame(struct VP10Decoder *pbi, YV12_BUFFER_CONFIG *sd);
+int av1_get_raw_frame(struct AV1Decoder *pbi, YV12_BUFFER_CONFIG *sd);
 
-aom_codec_err_t vp10_copy_reference_dec(struct VP10Decoder *pbi,
+aom_codec_err_t av1_copy_reference_dec(struct AV1Decoder *pbi,
                                         VPX_REFFRAME ref_frame_flag,
                                         YV12_BUFFER_CONFIG *sd);
 
-aom_codec_err_t vp10_set_reference_dec(VP10_COMMON *cm,
+aom_codec_err_t av1_set_reference_dec(AV1_COMMON *cm,
                                        VPX_REFFRAME ref_frame_flag,
                                        YV12_BUFFER_CONFIG *sd);
 
@@ -107,14 +107,14 @@ static INLINE uint8_t read_marker(aom_decrypt_cb decrypt_cb,
 
 // This function is exposed for use in tests, as well as the inlined function
 // "read_marker".
-aom_codec_err_t vp10_parse_superframe_index(const uint8_t *data, size_t data_sz,
+aom_codec_err_t av1_parse_superframe_index(const uint8_t *data, size_t data_sz,
                                             uint32_t sizes[8], int *count,
                                             aom_decrypt_cb decrypt_cb,
                                             void *decrypt_state);
 
-struct VP10Decoder *vp10_decoder_create(BufferPool *const pool);
+struct AV1Decoder *av1_decoder_create(BufferPool *const pool);
 
-void vp10_decoder_remove(struct VP10Decoder *pbi);
+void av1_decoder_remove(struct AV1Decoder *pbi);
 
 static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
                                       BufferPool *const pool) {
@@ -135,4 +135,4 @@ static INLINE void decrease_ref_count(int idx, RefCntBuffer *const frame_bufs,
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DECODER_H_
+#endif  // AV1_DECODER_DECODER_H_
diff --git a/av1/decoder/detokenize.c b/av1/decoder/detokenize.c
index b0e083e151efea1a1bd6057e89dd06cc89afdfc2..99ee02d360eb41804d1ff065cf64612896c92bb8 100644
--- a/av1/decoder/detokenize.c
+++ b/av1/decoder/detokenize.c
@@ -86,38 +86,38 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
     eob_branch_count = counts->eob_branch[tx_size][type][ref];
   }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->bd > VPX_BITS_8) {
     if (xd->bd == VPX_BITS_10) {
-      cat1_prob = vp10_cat1_prob_high10;
-      cat2_prob = vp10_cat2_prob_high10;
-      cat3_prob = vp10_cat3_prob_high10;
-      cat4_prob = vp10_cat4_prob_high10;
-      cat5_prob = vp10_cat5_prob_high10;
-      cat6_prob = vp10_cat6_prob_high10;
+      cat1_prob = av1_cat1_prob_high10;
+      cat2_prob = av1_cat2_prob_high10;
+      cat3_prob = av1_cat3_prob_high10;
+      cat4_prob = av1_cat4_prob_high10;
+      cat5_prob = av1_cat5_prob_high10;
+      cat6_prob = av1_cat6_prob_high10;
     } else {
-      cat1_prob = vp10_cat1_prob_high12;
-      cat2_prob = vp10_cat2_prob_high12;
-      cat3_prob = vp10_cat3_prob_high12;
-      cat4_prob = vp10_cat4_prob_high12;
-      cat5_prob = vp10_cat5_prob_high12;
-      cat6_prob = vp10_cat6_prob_high12;
+      cat1_prob = av1_cat1_prob_high12;
+      cat2_prob = av1_cat2_prob_high12;
+      cat3_prob = av1_cat3_prob_high12;
+      cat4_prob = av1_cat4_prob_high12;
+      cat5_prob = av1_cat5_prob_high12;
+      cat6_prob = av1_cat6_prob_high12;
     }
   } else {
-    cat1_prob = vp10_cat1_prob;
-    cat2_prob = vp10_cat2_prob;
-    cat3_prob = vp10_cat3_prob;
-    cat4_prob = vp10_cat4_prob;
-    cat5_prob = vp10_cat5_prob;
-    cat6_prob = vp10_cat6_prob;
+    cat1_prob = av1_cat1_prob;
+    cat2_prob = av1_cat2_prob;
+    cat3_prob = av1_cat3_prob;
+    cat4_prob = av1_cat4_prob;
+    cat5_prob = av1_cat5_prob;
+    cat6_prob = av1_cat6_prob;
   }
 #else
-  cat1_prob = vp10_cat1_prob;
-  cat2_prob = vp10_cat2_prob;
-  cat3_prob = vp10_cat3_prob;
-  cat4_prob = vp10_cat4_prob;
-  cat5_prob = vp10_cat5_prob;
-  cat6_prob = vp10_cat6_prob;
+  cat1_prob = av1_cat1_prob;
+  cat2_prob = av1_cat2_prob;
+  cat3_prob = av1_cat3_prob;
+  cat4_prob = av1_cat4_prob;
+  cat5_prob = av1_cat5_prob;
+  cat6_prob = av1_cat6_prob;
 #endif
 
   while (c < max_eob) {
@@ -147,8 +147,8 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
       val = 1;
     } else {
       INCREMENT_COUNT(TWO_TOKEN);
-      token = aom_read_tree(r, vp10_coef_con_tree,
-                            vp10_pareto8_full[prob[PIVOT_NODE] - 1]);
+      token = aom_read_tree(r, av1_coef_con_tree,
+                            av1_pareto8_full[prob[PIVOT_NODE] - 1]);
       switch (token) {
         case TWO_TOKEN:
         case THREE_TOKEN:
@@ -175,7 +175,7 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
           const int skip_bits = 0;
 #endif
           const uint8_t *cat6p = cat6_prob + skip_bits;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           switch (xd->bd) {
             case VPX_BITS_8:
               val = CAT6_MIN_VAL + read_coeff(cat6p, 14 - skip_bits, r);
@@ -201,15 +201,15 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
 #endif
     v = (val * dqv) >> dq_shift;
 #if CONFIG_COEFFICIENT_RANGE_CHECKING
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     dqcoeff[scan[c]] = highbd_check_range((aom_read_bit(r) ? -v : v), xd->bd);
 #else
     dqcoeff[scan[c]] = check_range(aom_read_bit(r) ? -v : v);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #else
     dqcoeff[scan[c]] = aom_read_bit(r) ? -v : v;
 #endif  // CONFIG_COEFFICIENT_RANGE_CHECKING
-    token_cache[scan[c]] = vp10_pt_energy_class[token];
+    token_cache[scan[c]] = av1_pt_energy_class[token];
     ++c;
     ctx = get_coef_context(nb, token_cache, c);
     dqv = dq[1];
@@ -218,8 +218,8 @@ static int decode_coefs(const MACROBLOCKD *xd, PLANE_TYPE type,
   return c;
 }
 
-// TODO(slavarnway): Decode version of vp10_set_context.  Modify
-// vp10_set_context
+// TODO(slavarnway): Decode version of av1_set_context.  Modify
+// av1_set_context
 // after testing is complete, then delete this version.
 static void dec_set_contexts(const MACROBLOCKD *xd,
                              struct macroblockd_plane *pd, TX_SIZE tx_size,
@@ -258,7 +258,7 @@ static void dec_set_contexts(const MACROBLOCKD *xd,
   }
 }
 
-int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+int av1_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
                              int x, int y, TX_SIZE tx_size, aom_reader *r,
                              int seg_id) {
   struct macroblockd_plane *const pd = &xd->plane[plane];
diff --git a/av1/decoder/detokenize.h b/av1/decoder/detokenize.h
index 1abb5d5af43ca709fbfe9098bb34d7be919e5fdd..2f21334a165a920b773b431e2f1fd662518bcb27 100644
--- a/av1/decoder/detokenize.h
+++ b/av1/decoder/detokenize.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_DECODER_DETOKENIZE_H_
-#define VP10_DECODER_DETOKENIZE_H_
+#ifndef AV1_DECODER_DETOKENIZE_H_
+#define AV1_DECODER_DETOKENIZE_H_
 
 #include "aom_dsp/bitreader.h"
 #include "av1/decoder/decoder.h"
@@ -20,7 +20,7 @@
 extern "C" {
 #endif
 
-int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
+int av1_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
                              int x, int y, TX_SIZE tx_size, aom_reader *r,
                              int seg_id);
 
@@ -28,4 +28,4 @@ int vp10_decode_block_tokens(MACROBLOCKD *xd, int plane, const scan_order *sc,
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DETOKENIZE_H_
+#endif  // AV1_DECODER_DETOKENIZE_H_
diff --git a/av1/decoder/dsubexp.c b/av1/decoder/dsubexp.c
index ac7aed5b981442fe5b243c5bbaeec5121e6e2477..5d941c02a7a824ca5542f1b8a0cafafa4e972e90 100644
--- a/av1/decoder/dsubexp.c
+++ b/av1/decoder/dsubexp.c
@@ -69,7 +69,7 @@ static int decode_term_subexp(aom_reader *r) {
   return decode_uniform(r) + 64;
 }
 
-void vp10_diff_update_prob(aom_reader *r, aom_prob *p) {
+void av1_diff_update_prob(aom_reader *r, aom_prob *p) {
   if (aom_read(r, DIFF_UPDATE_PROB)) {
     const int delp = decode_term_subexp(r);
     *p = (aom_prob)inv_remap_prob(delp, *p);
diff --git a/av1/decoder/dsubexp.h b/av1/decoder/dsubexp.h
index 779f345672409d46bc75230437384a7000841439..acc38ba6be065516d45bad1b393379d255d2bd75 100644
--- a/av1/decoder/dsubexp.h
+++ b/av1/decoder/dsubexp.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_DECODER_DSUBEXP_H_
-#define VP10_DECODER_DSUBEXP_H_
+#ifndef AV1_DECODER_DSUBEXP_H_
+#define AV1_DECODER_DSUBEXP_H_
 
 #include "aom_dsp/bitreader.h"
 
@@ -18,10 +18,10 @@
 extern "C" {
 #endif
 
-void vp10_diff_update_prob(aom_reader *r, aom_prob *p);
+void av1_diff_update_prob(aom_reader *r, aom_prob *p);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DSUBEXP_H_
+#endif  // AV1_DECODER_DSUBEXP_H_
diff --git a/av1/decoder/dthread.c b/av1/decoder/dthread.c
index 6ef92a420a6ef740588be30e061c3be4108d5c81..25f8dca28c75434b3995e17230ae700df6e7c48e 100644
--- a/av1/decoder/dthread.c
+++ b/av1/decoder/dthread.c
@@ -18,7 +18,7 @@
 // #define DEBUG_THREAD
 
 // TODO(hkuang): Clean up all the #ifdef in this file.
-void vp10_frameworker_lock_stats(VPxWorker *const worker) {
+void av1_frameworker_lock_stats(VPxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const worker_data = worker->data1;
   pthread_mutex_lock(&worker_data->stats_mutex);
@@ -27,7 +27,7 @@ void vp10_frameworker_lock_stats(VPxWorker *const worker) {
 #endif
 }
 
-void vp10_frameworker_unlock_stats(VPxWorker *const worker) {
+void av1_frameworker_unlock_stats(VPxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const worker_data = worker->data1;
   pthread_mutex_unlock(&worker_data->stats_mutex);
@@ -36,7 +36,7 @@ void vp10_frameworker_unlock_stats(VPxWorker *const worker) {
 #endif
 }
 
-void vp10_frameworker_signal_stats(VPxWorker *const worker) {
+void av1_frameworker_signal_stats(VPxWorker *const worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const worker_data = worker->data1;
 
@@ -60,7 +60,7 @@ void vp10_frameworker_signal_stats(VPxWorker *const worker) {
 #endif
 
 // TODO(hkuang): Remove worker parameter as it is only used in debug code.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
+void av1_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
                            int row) {
 #if CONFIG_MULTITHREAD
   if (!ref_buf) return;
@@ -77,7 +77,7 @@ void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
     VPxWorker *const ref_worker = ref_buf->frame_worker_owner;
     FrameWorkerData *const ref_worker_data =
         (FrameWorkerData *)ref_worker->data1;
-    const VP10Decoder *const pbi = ref_worker_data->pbi;
+    const AV1Decoder *const pbi = ref_worker_data->pbi;
 
 #ifdef DEBUG_THREAD
     {
@@ -88,7 +88,7 @@ void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
     }
 #endif
 
-    vp10_frameworker_lock_stats(ref_worker);
+    av1_frameworker_lock_stats(ref_worker);
     while (ref_buf->row < row && pbi->cur_buf == ref_buf &&
            ref_buf->buf.corrupted != 1) {
       pthread_cond_wait(&ref_worker_data->stats_cond,
@@ -97,12 +97,12 @@ void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
 
     if (ref_buf->buf.corrupted == 1) {
       FrameWorkerData *const worker_data = (FrameWorkerData *)worker->data1;
-      vp10_frameworker_unlock_stats(ref_worker);
+      av1_frameworker_unlock_stats(ref_worker);
       aom_internal_error(&worker_data->pbi->common.error,
                          VPX_CODEC_CORRUPT_FRAME,
                          "Worker %p failed to decode frame", worker);
     }
-    vp10_frameworker_unlock_stats(ref_worker);
+    av1_frameworker_unlock_stats(ref_worker);
   }
 #else
   (void)worker;
@@ -112,7 +112,7 @@ void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
 #endif  // CONFIG_MULTITHREAD
 }
 
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row) {
 #if CONFIG_MULTITHREAD
   VPxWorker *worker = buf->frame_worker_owner;
 
@@ -124,27 +124,27 @@ void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row) {
   }
 #endif
 
-  vp10_frameworker_lock_stats(worker);
+  av1_frameworker_lock_stats(worker);
   buf->row = row;
-  vp10_frameworker_signal_stats(worker);
-  vp10_frameworker_unlock_stats(worker);
+  av1_frameworker_signal_stats(worker);
+  av1_frameworker_unlock_stats(worker);
 #else
   (void)buf;
   (void)row;
 #endif  // CONFIG_MULTITHREAD
 }
 
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
+void av1_frameworker_copy_context(VPxWorker *const dst_worker,
                                    VPxWorker *const src_worker) {
 #if CONFIG_MULTITHREAD
   FrameWorkerData *const src_worker_data = (FrameWorkerData *)src_worker->data1;
   FrameWorkerData *const dst_worker_data = (FrameWorkerData *)dst_worker->data1;
-  VP10_COMMON *const src_cm = &src_worker_data->pbi->common;
-  VP10_COMMON *const dst_cm = &dst_worker_data->pbi->common;
+  AV1_COMMON *const src_cm = &src_worker_data->pbi->common;
+  AV1_COMMON *const dst_cm = &dst_worker_data->pbi->common;
   int i;
 
   // Wait until source frame's context is ready.
-  vp10_frameworker_lock_stats(src_worker);
+  av1_frameworker_lock_stats(src_worker);
   while (!src_worker_data->frame_context_ready) {
     pthread_cond_wait(&src_worker_data->stats_cond,
                       &src_worker_data->stats_mutex);
@@ -154,10 +154,10 @@ void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
                                    ? src_cm->current_frame_seg_map
                                    : src_cm->last_frame_seg_map;
   dst_worker_data->pbi->need_resync = src_worker_data->pbi->need_resync;
-  vp10_frameworker_unlock_stats(src_worker);
+  av1_frameworker_unlock_stats(src_worker);
 
   dst_cm->bit_depth = src_cm->bit_depth;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   dst_cm->use_highbitdepth = src_cm->use_highbitdepth;
 #endif
   dst_cm->prev_frame =
diff --git a/av1/decoder/dthread.h b/av1/decoder/dthread.h
index a40c0df9afded5f03236e67b56c486f400072dfe..dcaea1568b850ddb6d60558a7671ce8d3125d21b 100644
--- a/av1/decoder/dthread.h
+++ b/av1/decoder/dthread.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_DECODER_DTHREAD_H_
-#define VP10_DECODER_DTHREAD_H_
+#ifndef AV1_DECODER_DTHREAD_H_
+#define AV1_DECODER_DTHREAD_H_
 
 #include "./aom_config.h"
 #include "aom_util/aom_thread.h"
@@ -20,13 +20,13 @@
 extern "C" {
 #endif
 
-struct VP10Common;
-struct VP10Decoder;
+struct AV1Common;
+struct AV1Decoder;
 
 // WorkerData for the FrameWorker thread. It contains all the information of
 // the worker and decode structures for decoding a frame.
 typedef struct FrameWorkerData {
-  struct VP10Decoder *pbi;
+  struct AV1Decoder *pbi;
   const uint8_t *data;
   const uint8_t *data_end;
   size_t data_size;
@@ -49,27 +49,27 @@ typedef struct FrameWorkerData {
   int frame_decoded;        // Finished decoding current frame.
 } FrameWorkerData;
 
-void vp10_frameworker_lock_stats(VPxWorker *const worker);
-void vp10_frameworker_unlock_stats(VPxWorker *const worker);
-void vp10_frameworker_signal_stats(VPxWorker *const worker);
+void av1_frameworker_lock_stats(VPxWorker *const worker);
+void av1_frameworker_unlock_stats(VPxWorker *const worker);
+void av1_frameworker_signal_stats(VPxWorker *const worker);
 
 // Wait until ref_buf has been decoded to row in real pixel unit.
 // Note: worker may already finish decoding ref_buf and release it in order to
 // start decoding next frame. So need to check whether worker is still decoding
 // ref_buf.
-void vp10_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
+void av1_frameworker_wait(VPxWorker *const worker, RefCntBuffer *const ref_buf,
                            int row);
 
 // FrameWorker broadcasts its decoding progress so other workers that are
 // waiting on it can resume decoding.
-void vp10_frameworker_broadcast(RefCntBuffer *const buf, int row);
+void av1_frameworker_broadcast(RefCntBuffer *const buf, int row);
 
 // Copy necessary decoding context from src worker to dst worker.
-void vp10_frameworker_copy_context(VPxWorker *const dst_worker,
+void av1_frameworker_copy_context(VPxWorker *const dst_worker,
                                    VPxWorker *const src_worker);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_DECODER_DTHREAD_H_
+#endif  // AV1_DECODER_DTHREAD_H_
diff --git a/av1/encoder/aq_complexity.c b/av1/encoder/aq_complexity.c
index 0f632b87ba5cf5185013cddc02adfaebbfd3f0de..2fc004e1315f35056e58eb8cb2a0005fc2bf315b 100644
--- a/av1/encoder/aq_complexity.c
+++ b/av1/encoder/aq_complexity.c
@@ -43,12 +43,12 @@ static const double aq_c_var_thresholds[AQ_C_STRENGTHS][AQ_C_SEGMENTS] = {
 
 static int get_aq_c_strength(int q_index, aom_bit_depth_t bit_depth) {
   // Approximate base quatizer (truncated to int)
-  const int base_quant = vp10_ac_quant(q_index, 0, bit_depth) / 4;
+  const int base_quant = av1_ac_quant(q_index, 0, bit_depth) / 4;
   return (base_quant > 10) + (base_quant > 25);
 }
 
-void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_setup_in_frame_q_adj(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   struct segmentation *const seg = &cm->seg;
 
   // Make SURE use of floating point in this function is safe.
@@ -63,22 +63,22 @@ void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
     // Clear down the segment map.
     memset(cpi->segmentation_map, DEFAULT_AQ2_SEG, cm->mi_rows * cm->mi_cols);
 
-    vp10_clearall_segfeatures(seg);
+    av1_clearall_segfeatures(seg);
 
     // Segmentation only makes sense if the target bits per SB is above a
     // threshold. Below this the overheads will usually outweigh any benefit.
     if (cpi->rc.sb64_target_rate < 256) {
-      vp10_disable_segmentation(seg);
+      av1_disable_segmentation(seg);
       return;
     }
 
-    vp10_enable_segmentation(seg);
+    av1_enable_segmentation(seg);
 
     // Select delta coding method.
     seg->abs_delta = SEGMENT_DELTADATA;
 
     // Default segment "Q" feature is disabled so it defaults to the baseline Q.
-    vp10_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
+    av1_disable_segfeature(seg, DEFAULT_AQ2_SEG, SEG_LVL_ALT_Q);
 
     // Use some of the segments for in frame Q adjustment.
     for (segment = 0; segment < AQ_C_SEGMENTS; ++segment) {
@@ -86,7 +86,7 @@ void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
 
       if (segment == DEFAULT_AQ2_SEG) continue;
 
-      qindex_delta = vp10_compute_qdelta_by_rate(
+      qindex_delta = av1_compute_qdelta_by_rate(
           &cpi->rc, cm->frame_type, cm->base_qindex,
           aq_c_q_adj_factor[aq_strength][segment], cm->bit_depth);
 
@@ -98,8 +98,8 @@ void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
         qindex_delta = -cm->base_qindex + 1;
       }
       if ((cm->base_qindex + qindex_delta) > 0) {
-        vp10_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
-        vp10_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
+        av1_enable_segfeature(seg, segment, SEG_LVL_ALT_Q);
+        av1_set_segdata(seg, segment, SEG_LVL_ALT_Q, qindex_delta);
       }
     }
   }
@@ -111,9 +111,9 @@ void vp10_setup_in_frame_q_adj(VP10_COMP *cpi) {
 // Select a segment for the current block.
 // The choice of segment for a block depends on the ratio of the projected
 // bits for the block vs a target average and its spatial complexity.
-void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
+void av1_caq_select_segment(AV1_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
                              int mi_row, int mi_col, int projected_rate) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   const int mi_offset = mi_row * cm->mi_cols + mi_col;
   const int bw = num_8x8_blocks_wide_lookup[BLOCK_64X64];
@@ -140,8 +140,8 @@ void vp10_caq_select_segment(VP10_COMP *cpi, MACROBLOCK *mb, BLOCK_SIZE bs,
                                                     MIN_DEFAULT_LV_THRESH)
                                            : DEFAULT_LV_THRESH;
 
-    vp10_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
-    logvar = vp10_log_block_var(cpi, mb, bs);
+    av1_setup_src_planes(mb, cpi->Source, mi_row, mi_col);
+    logvar = av1_log_block_var(cpi, mb, bs);
 
     segment = AQ_C_SEGMENTS - 1;  // Just in case no break out below.
     for (i = 0; i < AQ_C_SEGMENTS; ++i) {
diff --git a/av1/encoder/aq_complexity.h b/av1/encoder/aq_complexity.h
index 7ba22c8c2ef689f53e54eec1d9bab699a4892b93..05658d46b995ee20126a354d00d914b31b1b6ef7 100644
--- a/av1/encoder/aq_complexity.h
+++ b/av1/encoder/aq_complexity.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_AQ_COMPLEXITY_H_
-#define VP10_ENCODER_AQ_COMPLEXITY_H_
+#ifndef AV1_ENCODER_AQ_COMPLEXITY_H_
+#define AV1_ENCODER_AQ_COMPLEXITY_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -18,20 +18,20 @@ extern "C" {
 
 #include "av1/common/enums.h"
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct macroblock;
 
 // Select a segment for the current Block.
-void vp10_caq_select_segment(struct VP10_COMP *cpi, struct macroblock *,
+void av1_caq_select_segment(struct AV1_COMP *cpi, struct macroblock *,
                              BLOCK_SIZE bs, int mi_row, int mi_col,
                              int projected_rate);
 
 // This function sets up a set of segments with delta Q values around
 // the baseline frame quantizer.
-void vp10_setup_in_frame_q_adj(struct VP10_COMP *cpi);
+void av1_setup_in_frame_q_adj(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_AQ_COMPLEXITY_H_
+#endif  // AV1_ENCODER_AQ_COMPLEXITY_H_
diff --git a/av1/encoder/aq_cyclicrefresh.c b/av1/encoder/aq_cyclicrefresh.c
index 3d48c148802f3096c47b62fff7a7d9fc78d08101..7f7a5b37d0f0ddafd5375dfea1d303b2a46a28d1 100644
--- a/av1/encoder/aq_cyclicrefresh.c
+++ b/av1/encoder/aq_cyclicrefresh.c
@@ -57,7 +57,7 @@ struct CYCLIC_REFRESH {
   int qindex_delta[3];
 };
 
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
   size_t last_coded_q_map_size;
   CYCLIC_REFRESH *const cr = aom_calloc(1, sizeof(*cr));
   if (cr == NULL) return NULL;
@@ -79,14 +79,14 @@ CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols) {
   return cr;
 }
 
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr) {
   aom_free(cr->map);
   aom_free(cr->last_coded_q_map);
   aom_free(cr);
 }
 
 // Check if we should turn off cyclic refresh based on bitrate condition.
-static int apply_cyclic_refresh_bitrate(const VP10_COMMON *cm,
+static int apply_cyclic_refresh_bitrate(const AV1_COMMON *cm,
                                         const RATE_CONTROL *rc) {
   // Turn off cyclic refresh if bits available per frame is not sufficiently
   // larger than bit cost of segmentation. Segment map bit cost should scale
@@ -134,10 +134,10 @@ static int candidate_refresh_aq(const CYCLIC_REFRESH *cr,
 }
 
 // Compute delta-q for the segment.
-static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
+static int compute_deltaq(const AV1_COMP *cpi, int q, double rate_factor) {
   const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   const RATE_CONTROL *const rc = &cpi->rc;
-  int deltaq = vp10_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
+  int deltaq = av1_compute_qdelta_by_rate(rc, cpi->common.frame_type, q,
                                            rate_factor, cpi->common.bit_depth);
   if ((-deltaq) > cr->max_qdelta_perc * q / 100) {
     deltaq = -cr->max_qdelta_perc * q / 100;
@@ -149,9 +149,9 @@ static int compute_deltaq(const VP10_COMP *cpi, int q, double rate_factor) {
 // from non-base segment. For now ignore effect of multiple segments
 // (with different delta-q). Note this function is called in the postencode
 // (called from rc_update_rate_correction_factors()).
-int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
+int av1_cyclic_refresh_estimate_bits_at_q(const AV1_COMP *cpi,
                                            double correction_factor) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int estimated_bits;
   int mbs = cm->MBs;
@@ -163,14 +163,14 @@ int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
   // Take segment weighted average for estimated bits.
   estimated_bits =
       (int)((1.0 - weight_segment1 - weight_segment2) *
-                vp10_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
+                av1_estimate_bits_at_q(cm->frame_type, cm->base_qindex, mbs,
                                         correction_factor, cm->bit_depth) +
             weight_segment1 *
-                vp10_estimate_bits_at_q(cm->frame_type,
+                av1_estimate_bits_at_q(cm->frame_type,
                                         cm->base_qindex + cr->qindex_delta[1],
                                         mbs, correction_factor, cm->bit_depth) +
             weight_segment2 *
-                vp10_estimate_bits_at_q(cm->frame_type,
+                av1_estimate_bits_at_q(cm->frame_type,
                                         cm->base_qindex + cr->qindex_delta[2],
                                         mbs, correction_factor, cm->bit_depth));
   return estimated_bits;
@@ -181,9 +181,9 @@ int vp10_cyclic_refresh_estimate_bits_at_q(const VP10_COMP *cpi,
 // rc_regulate_q() to set the base qp index.
 // Note: the segment map is set to either 0/CR_SEGMENT_ID_BASE (no refresh) or
 // to 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock, prior to encoding.
-int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
+int av1_cyclic_refresh_rc_bits_per_mb(const AV1_COMP *cpi, int i,
                                        double correction_factor) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int bits_per_mb;
   int num8x8bl = cm->MBs << 2;
@@ -198,10 +198,10 @@ int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
   int deltaq = compute_deltaq(cpi, i, cr->rate_ratio_qdelta);
   // Take segment weighted average for bits per mb.
   bits_per_mb =
-      (int)((1.0 - weight_segment) * vp10_rc_bits_per_mb(cm->frame_type, i,
+      (int)((1.0 - weight_segment) * av1_rc_bits_per_mb(cm->frame_type, i,
                                                          correction_factor,
                                                          cm->bit_depth) +
-            weight_segment * vp10_rc_bits_per_mb(cm->frame_type, i + deltaq,
+            weight_segment * av1_rc_bits_per_mb(cm->frame_type, i + deltaq,
                                                  correction_factor,
                                                  cm->bit_depth));
   return bits_per_mb;
@@ -210,11 +210,11 @@ int vp10_cyclic_refresh_rc_bits_per_mb(const VP10_COMP *cpi, int i,
 // Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
 // check if we should reset the segment_id, and update the cyclic_refresh map
 // and segmentation map.
-void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
+void av1_cyclic_refresh_update_segment(AV1_COMP *const cpi,
                                         MB_MODE_INFO *const mbmi, int mi_row,
                                         int mi_col, BLOCK_SIZE bsize,
                                         int64_t rate, int64_t dist, int skip) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   const int bw = num_8x8_blocks_wide_lookup[bsize];
   const int bh = num_8x8_blocks_high_lookup[bsize];
@@ -278,8 +278,8 @@ void vp10_cyclic_refresh_update_segment(VP10_COMP *const cpi,
 }
 
 // Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_postencode(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   unsigned char *const seg_map = cpi->segmentation_map;
   int mi_row, mi_col;
@@ -298,7 +298,7 @@ void vp10_cyclic_refresh_postencode(VP10_COMP *const cpi) {
 }
 
 // Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_set_golden_update(AV1_COMP *const cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   // Set minimum gf_interval for GF update to a multiple (== 2) of refresh
@@ -314,8 +314,8 @@ void vp10_cyclic_refresh_set_golden_update(VP10_COMP *const cpi) {
 // background has high motion, refresh the golden frame. Otherwise, if the
 // golden reference is to be updated check if we should NOT update the golden
 // ref.
-void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_check_golden_update(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   int mi_row, mi_col;
   double fraction_low = 0.0;
@@ -356,7 +356,7 @@ void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
   // the resolution (resize_pending != 0).
   if (cpi->resize_pending != 0 ||
       (cnt1 * 10 > (70 * rows * cols) && cnt2 * 20 < cnt1)) {
-    vp10_cyclic_refresh_set_golden_update(cpi);
+    av1_cyclic_refresh_set_golden_update(cpi);
     rc->frames_till_gf_update_due = rc->baseline_gf_interval;
 
     if (rc->frames_till_gf_update_due > rc->frames_to_key)
@@ -385,8 +385,8 @@ void vp10_cyclic_refresh_check_golden_update(VP10_COMP *const cpi) {
 // 1/CR_SEGMENT_ID_BOOST1 (refresh) for each superblock.
 // Blocks labeled as BOOST1 may later get set to BOOST2 (during the
 // encoding of the superblock).
-static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void cyclic_refresh_update_map(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   unsigned char *const seg_map = cpi->segmentation_map;
   int i, block_count, bl_index, sb_rows, sb_cols, sbs_in_frame;
@@ -412,7 +412,7 @@ static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
     int mi_col = sb_col_index * MI_BLOCK_SIZE;
     int qindex_thresh =
         cpi->oxcf.content == VPX_CONTENT_SCREEN
-            ? vp10_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
+            ? av1_get_qindex(&cm->seg, CR_SEGMENT_ID_BOOST2, cm->base_qindex)
             : 0;
     assert(mi_row >= 0 && mi_row < cm->mi_rows);
     assert(mi_col >= 0 && mi_col < cm->mi_cols);
@@ -453,9 +453,9 @@ static void cyclic_refresh_update_map(VP10_COMP *const cpi) {
 }
 
 // Set cyclic refresh parameters.
-void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
+void av1_cyclic_refresh_update_parameters(AV1_COMP *const cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   cr->percent_refresh = 10;
   cr->max_qdelta_perc = 50;
@@ -477,8 +477,8 @@ void vp10_cyclic_refresh_update_parameters(VP10_COMP *const cpi) {
 }
 
 // Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_setup(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   struct segmentation *const seg = &cm->seg;
@@ -489,7 +489,7 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
     // Set segmentation map to 0 and disable.
     unsigned char *const seg_map = cpi->segmentation_map;
     memset(seg_map, 0, cm->mi_rows * cm->mi_cols);
-    vp10_disable_segmentation(&cm->seg);
+    av1_disable_segmentation(&cm->seg);
     if (cm->frame_type == KEY_FRAME) {
       memset(cr->last_coded_q_map, MAXQ,
              cm->mi_rows * cm->mi_cols * sizeof(*cr->last_coded_q_map));
@@ -499,37 +499,37 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
   } else {
     int qindex_delta = 0;
     int qindex2;
-    const double q = vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
+    const double q = av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth);
     aom_clear_system_state();
     // Set rate threshold to some multiple (set to 2 for now) of the target
     // rate (target is given by sb64_target_rate and scaled by 256).
     cr->thresh_rate_sb = ((int64_t)(rc->sb64_target_rate) << 8) << 2;
     // Distortion threshold, quadratic in Q, scale factor to be adjusted.
     // q will not exceed 457, so (q * q) is within 32bit; see:
-    // vp10_convert_qindex_to_q(), vp10_ac_quant(), ac_qlookup*[].
+    // av1_convert_qindex_to_q(), av1_ac_quant(), ac_qlookup*[].
     cr->thresh_dist_sb = ((int64_t)(q * q)) << 2;
 
     // Set up segmentation.
     // Clear down the segment map.
-    vp10_enable_segmentation(&cm->seg);
-    vp10_clearall_segfeatures(seg);
+    av1_enable_segmentation(&cm->seg);
+    av1_clearall_segfeatures(seg);
     // Select delta coding method.
     seg->abs_delta = SEGMENT_DELTADATA;
 
     // Note: setting temporal_update has no effect, as the seg-map coding method
     // (temporal or spatial) is determined in
-    // vp10_choose_segmap_coding_method(),
+    // av1_choose_segmap_coding_method(),
     // based on the coding cost of each method. For error_resilient mode on the
     // last_frame_seg_map is set to 0, so if temporal coding is used, it is
     // relative to 0 previous map.
     // seg->temporal_update = 0;
 
     // Segment BASE "Q" feature is disabled so it defaults to the baseline Q.
-    vp10_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
+    av1_disable_segfeature(seg, CR_SEGMENT_ID_BASE, SEG_LVL_ALT_Q);
     // Use segment BOOST1 for in-frame Q adjustment.
-    vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
+    av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q);
     // Use segment BOOST2 for more aggressive in-frame Q adjustment.
-    vp10_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
+    av1_enable_segfeature(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q);
 
     // Set the q delta for segment BOOST1.
     qindex_delta = compute_deltaq(cpi, cm->base_qindex, cr->rate_ratio_qdelta);
@@ -538,9 +538,9 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
     // Compute rd-mult for segment BOOST1.
     qindex2 = clamp(cm->base_qindex + cm->y_dc_delta_q + qindex_delta, 0, MAXQ);
 
-    cr->rdmult = vp10_compute_rd_mult(cpi, qindex2);
+    cr->rdmult = av1_compute_rd_mult(cpi, qindex2);
 
-    vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
+    av1_set_segdata(seg, CR_SEGMENT_ID_BOOST1, SEG_LVL_ALT_Q, qindex_delta);
 
     // Set a more aggressive (higher) q delta for segment BOOST2.
     qindex_delta = compute_deltaq(
@@ -548,19 +548,19 @@ void vp10_cyclic_refresh_setup(VP10_COMP *const cpi) {
         VPXMIN(CR_MAX_RATE_TARGET_RATIO,
                0.1 * cr->rate_boost_fac * cr->rate_ratio_qdelta));
     cr->qindex_delta[2] = qindex_delta;
-    vp10_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
+    av1_set_segdata(seg, CR_SEGMENT_ID_BOOST2, SEG_LVL_ALT_Q, qindex_delta);
 
     // Update the segmentation and refresh map.
     cyclic_refresh_update_map(cpi);
   }
 }
 
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr) {
   return cr->rdmult;
 }
 
-void vp10_cyclic_refresh_reset_resize(VP10_COMP *const cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_cyclic_refresh_reset_resize(AV1_COMP *const cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   CYCLIC_REFRESH *const cr = cpi->cyclic_refresh;
   memset(cr->map, 0, cm->mi_rows * cm->mi_cols);
   cr->sb_index = 0;
diff --git a/av1/encoder/aq_cyclicrefresh.h b/av1/encoder/aq_cyclicrefresh.h
index dcdc039e761eae2d8975cf229430e4b0341eb57a..327fe149d3c6af804fafc79ce73a7ac7e4391c1d 100644
--- a/av1/encoder/aq_cyclicrefresh.h
+++ b/av1/encoder/aq_cyclicrefresh.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_AQ_CYCLICREFRESH_H_
-#define VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#ifndef AV1_ENCODER_AQ_CYCLICREFRESH_H_
+#define AV1_ENCODER_AQ_CYCLICREFRESH_H_
 
 #include "av1/common/blockd.h"
 
@@ -27,55 +27,55 @@ extern "C" {
 // Maximum rate target ratio for setting segment delta-qp.
 #define CR_MAX_RATE_TARGET_RATIO 4.0
 
-struct VP10_COMP;
+struct AV1_COMP;
 
 struct CYCLIC_REFRESH;
 typedef struct CYCLIC_REFRESH CYCLIC_REFRESH;
 
-CYCLIC_REFRESH *vp10_cyclic_refresh_alloc(int mi_rows, int mi_cols);
+CYCLIC_REFRESH *av1_cyclic_refresh_alloc(int mi_rows, int mi_cols);
 
-void vp10_cyclic_refresh_free(CYCLIC_REFRESH *cr);
+void av1_cyclic_refresh_free(CYCLIC_REFRESH *cr);
 
 // Estimate the bits, incorporating the delta-q from segment 1, after encoding
 // the frame.
-int vp10_cyclic_refresh_estimate_bits_at_q(const struct VP10_COMP *cpi,
+int av1_cyclic_refresh_estimate_bits_at_q(const struct AV1_COMP *cpi,
                                            double correction_factor);
 
 // Estimate the bits per mb, for a given q = i and a corresponding delta-q
 // (for segment 1), prior to encoding the frame.
-int vp10_cyclic_refresh_rc_bits_per_mb(const struct VP10_COMP *cpi, int i,
+int av1_cyclic_refresh_rc_bits_per_mb(const struct AV1_COMP *cpi, int i,
                                        double correction_factor);
 
 // Prior to coding a given prediction block, of size bsize at (mi_row, mi_col),
 // check if we should reset the segment_id, and update the cyclic_refresh map
 // and segmentation map.
-void vp10_cyclic_refresh_update_segment(struct VP10_COMP *const cpi,
+void av1_cyclic_refresh_update_segment(struct AV1_COMP *const cpi,
                                         MB_MODE_INFO *const mbmi, int mi_row,
                                         int mi_col, BLOCK_SIZE bsize,
                                         int64_t rate, int64_t dist, int skip);
 
 // Update the segmentation map, and related quantities: cyclic refresh map,
 // refresh sb_index, and target number of blocks to be refreshed.
-void vp10_cyclic_refresh_update__map(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update__map(struct AV1_COMP *const cpi);
 
 // Update the actual number of blocks that were applied the segment delta q.
-void vp10_cyclic_refresh_postencode(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_postencode(struct AV1_COMP *const cpi);
 
 // Set golden frame update interval, for 1 pass CBR mode.
-void vp10_cyclic_refresh_set_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_set_golden_update(struct AV1_COMP *const cpi);
 
 // Check if we should not update golden reference, based on past refresh stats.
-void vp10_cyclic_refresh_check_golden_update(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_check_golden_update(struct AV1_COMP *const cpi);
 
 // Set/update global/frame level refresh parameters.
-void vp10_cyclic_refresh_update_parameters(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_update_parameters(struct AV1_COMP *const cpi);
 
 // Setup cyclic background refresh: set delta q and segmentation map.
-void vp10_cyclic_refresh_setup(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_setup(struct AV1_COMP *const cpi);
 
-int vp10_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
+int av1_cyclic_refresh_get_rdmult(const CYCLIC_REFRESH *cr);
 
-void vp10_cyclic_refresh_reset_resize(struct VP10_COMP *const cpi);
+void av1_cyclic_refresh_reset_resize(struct AV1_COMP *const cpi);
 
 static INLINE int cyclic_refresh_segment_id_boosted(int segment_id) {
   return segment_id == CR_SEGMENT_ID_BOOST1 ||
@@ -95,4 +95,4 @@ static INLINE int cyclic_refresh_segment_id(int segment_id) {
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_AQ_CYCLICREFRESH_H_
+#endif  // AV1_ENCODER_AQ_CYCLICREFRESH_H_
diff --git a/av1/encoder/aq_variance.c b/av1/encoder/aq_variance.c
index 37e53f63d9a09df360ff734eae12ae2d641112d3..b2eb17a9318c087ac6f54ab3bd4d293de328d8e7 100644
--- a/av1/encoder/aq_variance.c
+++ b/av1/encoder/aq_variance.c
@@ -33,26 +33,26 @@ static const int segment_id[ENERGY_SPAN] = { 0, 1, 1, 2, 3, 4 };
 
 #define SEGMENT_ID(i) segment_id[(i)-ENERGY_MIN]
 
-DECLARE_ALIGNED(16, static const uint8_t, vp10_64_zeros[64]) = { 0 };
-#if CONFIG_VPX_HIGHBITDEPTH
-DECLARE_ALIGNED(16, static const uint16_t, vp10_highbd_64_zeros[64]) = { 0 };
+DECLARE_ALIGNED(16, static const uint8_t, av1_64_zeros[64]) = { 0 };
+#if CONFIG_AOM_HIGHBITDEPTH
+DECLARE_ALIGNED(16, static const uint16_t, av1_highbd_64_zeros[64]) = { 0 };
 #endif
 
-unsigned int vp10_vaq_segment_id(int energy) {
+unsigned int av1_vaq_segment_id(int energy) {
   ENERGY_IN_BOUNDS(energy);
   return SEGMENT_ID(energy);
 }
 
-void vp10_vaq_frame_setup(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+void av1_vaq_frame_setup(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   struct segmentation *seg = &cm->seg;
   int i;
 
   if (frame_is_intra_only(cm) || cm->error_resilient_mode ||
       cpi->refresh_alt_ref_frame ||
       (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
-    vp10_enable_segmentation(seg);
-    vp10_clearall_segfeatures(seg);
+    av1_enable_segmentation(seg);
+    av1_clearall_segfeatures(seg);
 
     seg->abs_delta = SEGMENT_DELTADATA;
 
@@ -60,7 +60,7 @@ void vp10_vaq_frame_setup(VP10_COMP *cpi) {
 
     for (i = 0; i < MAX_SEGMENTS; ++i) {
       int qindex_delta =
-          vp10_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
+          av1_compute_qdelta_by_rate(&cpi->rc, cm->frame_type, cm->base_qindex,
                                       rate_ratio[i], cm->bit_depth);
 
       // We don't allow qindex 0 in a segment if the base value is not 0.
@@ -76,8 +76,8 @@ void vp10_vaq_frame_setup(VP10_COMP *cpi) {
         continue;
       }
 
-      vp10_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
-      vp10_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
+      av1_set_segdata(seg, i, SEG_LVL_ALT_Q, qindex_delta);
+      av1_enable_segfeature(seg, i, SEG_LVL_ALT_Q);
     }
   }
 }
@@ -105,7 +105,7 @@ static void aq_variance(const uint8_t *a, int a_stride, const uint8_t *b,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void aq_highbd_variance64(const uint8_t *a8, int a_stride,
                                  const uint8_t *b8, int b_stride, int w, int h,
                                  uint64_t *sse, uint64_t *sum) {
@@ -136,9 +136,9 @@ static void aq_highbd_8_variance(const uint8_t *a8, int a_stride,
   *sse = (unsigned int)sse_long;
   *sum = (int)sum_long;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
+static unsigned int block_variance(AV1_COMP *cpi, MACROBLOCK *x,
                                    BLOCK_SIZE bs) {
   MACROBLOCKD *xd = &x->e_mbd;
   unsigned int var, sse;
@@ -151,54 +151,54 @@ static unsigned int block_variance(VP10_COMP *cpi, MACROBLOCK *x,
     const int bw = 8 * num_8x8_blocks_wide_lookup[bs] - right_overflow;
     const int bh = 8 * num_8x8_blocks_high_lookup[bs] - bottom_overflow;
     int avg;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       aq_highbd_8_variance(x->plane[0].src.buf, x->plane[0].src.stride,
-                           CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, bw, bh,
+                           CONVERT_TO_BYTEPTR(av1_highbd_64_zeros), 0, bw, bh,
                            &sse, &avg);
       sse >>= 2 * (xd->bd - 8);
       avg >>= (xd->bd - 8);
     } else {
-      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+      aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_64_zeros, 0,
                   bw, bh, &sse, &avg);
     }
 #else
-    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, vp10_64_zeros, 0,
+    aq_variance(x->plane[0].src.buf, x->plane[0].src.stride, av1_64_zeros, 0,
                 bw, bh, &sse, &avg);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     var = sse - (((int64_t)avg * avg) / (bw * bh));
     return (256 * var) / (bw * bh);
   } else {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       var =
           cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
-                             CONVERT_TO_BYTEPTR(vp10_highbd_64_zeros), 0, &sse);
+                             CONVERT_TO_BYTEPTR(av1_highbd_64_zeros), 0, &sse);
     } else {
       var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
-                               vp10_64_zeros, 0, &sse);
+                               av1_64_zeros, 0, &sse);
     }
 #else
     var = cpi->fn_ptr[bs].vf(x->plane[0].src.buf, x->plane[0].src.stride,
-                             vp10_64_zeros, 0, &sse);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+                             av1_64_zeros, 0, &sse);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return (256 * var) >> num_pels_log2_lookup[bs];
   }
 }
 
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
   unsigned int var = block_variance(cpi, x, bs);
   aom_clear_system_state();
   return log(var + 1.0);
 }
 
 #define DEFAULT_E_MIDPOINT 10.0
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs) {
   double energy;
   double energy_midpoint;
   aom_clear_system_state();
   energy_midpoint =
       (cpi->oxcf.pass == 2) ? cpi->twopass.mb_av_energy : DEFAULT_E_MIDPOINT;
-  energy = vp10_log_block_var(cpi, x, bs) - energy_midpoint;
+  energy = av1_log_block_var(cpi, x, bs) - energy_midpoint;
   return clamp((int)round(energy), ENERGY_MIN, ENERGY_MAX);
 }
diff --git a/av1/encoder/aq_variance.h b/av1/encoder/aq_variance.h
index fa1103c19c148261e1288a8eaac6a2b40cc536d0..4900aa75d49abd8b5f27010dd3a565985b390251 100644
--- a/av1/encoder/aq_variance.h
+++ b/av1/encoder/aq_variance.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_AQ_VARIANCE_H_
-#define VP10_ENCODER_AQ_VARIANCE_H_
+#ifndef AV1_ENCODER_AQ_VARIANCE_H_
+#define AV1_ENCODER_AQ_VARIANCE_H_
 
 #include "av1/encoder/encoder.h"
 
@@ -18,14 +18,14 @@
 extern "C" {
 #endif
 
-unsigned int vp10_vaq_segment_id(int energy);
-void vp10_vaq_frame_setup(VP10_COMP *cpi);
+unsigned int av1_vaq_segment_id(int energy);
+void av1_vaq_frame_setup(AV1_COMP *cpi);
 
-int vp10_block_energy(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
-double vp10_log_block_var(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+int av1_block_energy(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
+double av1_log_block_var(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bs);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_AQ_VARIANCE_H_
+#endif  // AV1_ENCODER_AQ_VARIANCE_H_
diff --git a/av1/encoder/arm/neon/dct_neon.c b/av1/encoder/arm/neon/dct_neon.c
index 947b41a52a19251046fb39fb115d0dc981c6567c..bfcb0b48b17723822ded92f48d6bf9ee7d550d6f 100644
--- a/av1/encoder/arm/neon/dct_neon.c
+++ b/av1/encoder/arm/neon/dct_neon.c
@@ -18,7 +18,7 @@
 #include "av1/common/blockd.h"
 #include "aom_dsp/txfm_common.h"
 
-void vp10_fdct8x8_quant_neon(
+void av1_fdct8x8_quant_neon(
     const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
     int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
     const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
@@ -28,7 +28,7 @@ void vp10_fdct8x8_quant_neon(
   (void)coeff_ptr;
 
   aom_fdct8x8_neon(input, temp_buffer, stride);
-  vp10_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
+  av1_quantize_fp_neon(temp_buffer, n_coeffs, skip_block, zbin_ptr, round_ptr,
                         quant_ptr, quant_shift_ptr, qcoeff_ptr, dqcoeff_ptr,
                         dequant_ptr, eob_ptr, scan_ptr, iscan_ptr);
 }
diff --git a/av1/encoder/arm/neon/error_neon.c b/av1/encoder/arm/neon/error_neon.c
index b0761cdc21e875db56a62662cae7f42f4bf4a5b1..0c89f2e60fc9cfc87bf243cc547b32b78116f7fe 100644
--- a/av1/encoder/arm/neon/error_neon.c
+++ b/av1/encoder/arm/neon/error_neon.c
@@ -14,7 +14,7 @@
 
 #include "./av1_rtcd.h"
 
-int64_t vp10_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
+int64_t av1_block_error_fp_neon(const int16_t *coeff, const int16_t *dqcoeff,
                                  int block_size) {
   int64x2_t error = vdupq_n_s64(0);
 
diff --git a/av1/encoder/arm/neon/quantize_neon.c b/av1/encoder/arm/neon/quantize_neon.c
index ded962d1b55c0212e345ee6fb364173e540cab92..6abf63955cbf291f708795e84373b5e0f9e18496 100644
--- a/av1/encoder/arm/neon/quantize_neon.c
+++ b/av1/encoder/arm/neon/quantize_neon.c
@@ -22,7 +22,7 @@
 #include "av1/encoder/quantize.h"
 #include "av1/encoder/rd.h"
 
-void vp10_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
+void av1_quantize_fp_neon(const int16_t *coeff_ptr, intptr_t count,
                            int skip_block, const int16_t *zbin_ptr,
                            const int16_t *round_ptr, const int16_t *quant_ptr,
                            const int16_t *quant_shift_ptr, int16_t *qcoeff_ptr,
diff --git a/av1/encoder/bitstream.c b/av1/encoder/bitstream.c
index 3f1ccc6825a3e4a8b65a399875ab5400ee6d4d21..2c90bec0b3fbf5404880c14acb42e90eee1d0e45 100644
--- a/av1/encoder/bitstream.c
+++ b/av1/encoder/bitstream.c
@@ -42,34 +42,34 @@
 #include "av1/encoder/subexp.h"
 #include "av1/encoder/tokenize.h"
 
-static const struct vp10_token intra_mode_encodings[INTRA_MODES] = {
+static const struct av1_token intra_mode_encodings[INTRA_MODES] = {
   { 0, 1 },  { 6, 3 },   { 28, 5 },  { 30, 5 }, { 58, 6 },
   { 59, 6 }, { 126, 7 }, { 127, 7 }, { 62, 6 }, { 2, 2 }
 };
-static const struct vp10_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
+static const struct av1_token switchable_interp_encodings[SWITCHABLE_FILTERS] =
     { { 0, 1 }, { 2, 2 }, { 3, 2 } };
-static const struct vp10_token partition_encodings[PARTITION_TYPES] = {
+static const struct av1_token partition_encodings[PARTITION_TYPES] = {
   { 0, 1 }, { 2, 2 }, { 6, 3 }, { 7, 3 }
 };
-static const struct vp10_token inter_mode_encodings[INTER_MODES] = {
+static const struct av1_token inter_mode_encodings[INTER_MODES] = {
   { 2, 2 }, { 6, 3 }, { 0, 1 }, { 7, 3 }
 };
 
-static struct vp10_token ext_tx_encodings[TX_TYPES];
+static struct av1_token ext_tx_encodings[TX_TYPES];
 
-void vp10_encode_token_init() {
-  vp10_tokens_from_tree(ext_tx_encodings, vp10_ext_tx_tree);
+void av1_encode_token_init() {
+  av1_tokens_from_tree(ext_tx_encodings, av1_ext_tx_tree);
 }
 
 static void write_intra_mode(aom_writer *w, PREDICTION_MODE mode,
                              const aom_prob *probs) {
-  vp10_write_token(w, vp10_intra_mode_tree, probs, &intra_mode_encodings[mode]);
+  av1_write_token(w, av1_intra_mode_tree, probs, &intra_mode_encodings[mode]);
 }
 
 static void write_inter_mode(aom_writer *w, PREDICTION_MODE mode,
                              const aom_prob *probs) {
   assert(is_inter_mode(mode));
-  vp10_write_token(w, vp10_inter_mode_tree, probs,
+  av1_write_token(w, av1_inter_mode_tree, probs,
                    &inter_mode_encodings[INTER_OFFSET(mode)]);
 }
 
@@ -88,9 +88,9 @@ static void prob_diff_update(const aom_tree_index *tree,
   // Assuming max number of probabilities <= 32
   assert(n <= 32);
 
-  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  av1_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i)
-    vp10_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
+    av1_cond_prob_diff_update(w, &probs[i], branch_ct[i]);
 }
 
 static int prob_diff_update_savings(const aom_tree_index *tree,
@@ -103,14 +103,14 @@ static int prob_diff_update_savings(const aom_tree_index *tree,
 
   // Assuming max number of probabilities <= 32
   assert(n <= 32);
-  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  av1_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i) {
-    savings += vp10_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
+    savings += av1_cond_prob_diff_update_savings(&probs[i], branch_ct[i]);
   }
   return savings;
 }
 
-static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_selected_tx_size(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                                    aom_writer *w) {
   TX_SIZE tx_size = xd->mi[0]->mbmi.tx_size;
   BLOCK_SIZE bsize = xd->mi[0]->mbmi.sb_type;
@@ -125,37 +125,37 @@ static void write_selected_tx_size(const VP10_COMMON *cm, const MACROBLOCKD *xd,
   }
 }
 
-static int write_skip(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static int write_skip(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                       int segment_id, const MODE_INFO *mi, aom_writer *w) {
   if (segfeature_active(&cm->seg, segment_id, SEG_LVL_SKIP)) {
     return 1;
   } else {
     const int skip = mi->mbmi.skip;
-    aom_write(w, skip, vp10_get_skip_prob(cm, xd));
+    aom_write(w, skip, av1_get_skip_prob(cm, xd));
     return skip;
   }
 }
 
-static void update_skip_probs(VP10_COMMON *cm, aom_writer *w,
+static void update_skip_probs(AV1_COMMON *cm, aom_writer *w,
                               FRAME_COUNTS *counts) {
   int k;
 
   for (k = 0; k < SKIP_CONTEXTS; ++k)
-    vp10_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
+    av1_cond_prob_diff_update(w, &cm->fc->skip_probs[k], counts->skip[k]);
 }
 
-static void update_switchable_interp_probs(VP10_COMMON *cm, aom_writer *w,
+static void update_switchable_interp_probs(AV1_COMMON *cm, aom_writer *w,
                                            FRAME_COUNTS *counts) {
   int j;
   for (j = 0; j < SWITCHABLE_FILTER_CONTEXTS; ++j)
-    prob_diff_update(vp10_switchable_interp_tree,
+    prob_diff_update(av1_switchable_interp_tree,
                      cm->fc->switchable_interp_prob[j],
                      counts->switchable_interp[j], SWITCHABLE_FILTERS, w);
 }
 
-static void update_ext_tx_probs(VP10_COMMON *cm, aom_writer *w) {
-  const int savings_thresh = vp10_cost_one(GROUP_DIFF_UPDATE_PROB) -
-                             vp10_cost_zero(GROUP_DIFF_UPDATE_PROB);
+static void update_ext_tx_probs(AV1_COMMON *cm, aom_writer *w) {
+  const int savings_thresh = av1_cost_one(GROUP_DIFF_UPDATE_PROB) -
+                             av1_cost_zero(GROUP_DIFF_UPDATE_PROB);
   int i, j;
 
   int savings = 0;
@@ -163,7 +163,7 @@ static void update_ext_tx_probs(VP10_COMMON *cm, aom_writer *w) {
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
       savings += prob_diff_update_savings(
-          vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+          av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
           cm->counts.intra_ext_tx[i][j], TX_TYPES);
   }
   do_update = savings > savings_thresh;
@@ -171,21 +171,21 @@ static void update_ext_tx_probs(VP10_COMMON *cm, aom_writer *w) {
   if (do_update) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
       for (j = 0; j < TX_TYPES; ++j)
-        prob_diff_update(vp10_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
+        prob_diff_update(av1_ext_tx_tree, cm->fc->intra_ext_tx_prob[i][j],
                          cm->counts.intra_ext_tx[i][j], TX_TYPES, w);
     }
   }
   savings = 0;
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     savings +=
-        prob_diff_update_savings(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+        prob_diff_update_savings(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
                                  cm->counts.inter_ext_tx[i], TX_TYPES);
   }
   do_update = savings > savings_thresh;
   aom_write(w, do_update, GROUP_DIFF_UPDATE_PROB);
   if (do_update) {
     for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-      prob_diff_update(vp10_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
+      prob_diff_update(av1_ext_tx_tree, cm->fc->inter_ext_tx_prob[i],
                        cm->counts.inter_ext_tx[i], TX_TYPES, w);
     }
   }
@@ -201,22 +201,22 @@ static void pack_mb_tokens(aom_writer *w, TOKENEXTRA **tp,
 
   while (p < stop && p->token != EOSB_TOKEN) {
     const int t = p->token;
-    const struct vp10_token *const a = &vp10_coef_encodings[t];
+    const struct av1_token *const a = &av1_coef_encodings[t];
     int i = 0;
     int v = a->value;
     int n = a->len;
-#if CONFIG_VPX_HIGHBITDEPTH
-    const vp10_extra_bit *b;
+#if CONFIG_AOM_HIGHBITDEPTH
+    const av1_extra_bit *b;
     if (bit_depth == VPX_BITS_12)
-      b = &vp10_extra_bits_high12[t];
+      b = &av1_extra_bits_high12[t];
     else if (bit_depth == VPX_BITS_10)
-      b = &vp10_extra_bits_high10[t];
+      b = &av1_extra_bits_high10[t];
     else
-      b = &vp10_extra_bits[t];
+      b = &av1_extra_bits[t];
 #else
-    const vp10_extra_bit *const b = &vp10_extra_bits[t];
+    const av1_extra_bit *const b = &av1_extra_bits[t];
     (void)bit_depth;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     /* skip one or two nodes */
     if (p->skip_eob_node) {
@@ -235,12 +235,12 @@ static void pack_mb_tokens(aom_writer *w, TOKENEXTRA **tp,
     if (t >= TWO_TOKEN && t < EOB_TOKEN) {
       int len = UNCONSTRAINED_NODES - p->skip_eob_node;
       int bits = v >> (n - len);
-      vp10_write_tree(w, vp10_coef_tree, p->context_tree, bits, len, i);
-      vp10_write_tree(w, vp10_coef_con_tree,
-                      vp10_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
+      av1_write_tree(w, av1_coef_tree, p->context_tree, bits, len, i);
+      av1_write_tree(w, av1_coef_con_tree,
+                      av1_pareto8_full[p->context_tree[PIVOT_NODE] - 1], v,
                       n - len, 0);
     } else {
-      vp10_write_tree(w, vp10_coef_tree, p->context_tree, v, n, i);
+      av1_write_tree(w, av1_coef_tree, p->context_tree, v, n, i);
     }
 
     if (b->base_val) {
@@ -281,11 +281,11 @@ static void write_segment_id(aom_writer *w, const struct segmentation *seg,
                              const struct segmentation_probs *segp,
                              int segment_id) {
   if (seg->enabled && seg->update_map)
-    vp10_write_tree(w, vp10_segment_tree, segp->tree_probs, segment_id, 3, 0);
+    av1_write_tree(w, av1_segment_tree, segp->tree_probs, segment_id, 3, 0);
 }
 
 // This function encodes the reference frame
-static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_ref_frames(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                              aom_writer *w) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   const int is_compound = has_second_ref(mbmi);
@@ -301,28 +301,28 @@ static void write_ref_frames(const VP10_COMMON *cm, const MACROBLOCKD *xd,
     // does the feature use compound prediction or not
     // (if not specified at the frame/segment level)
     if (cm->reference_mode == REFERENCE_MODE_SELECT) {
-      aom_write(w, is_compound, vp10_get_reference_mode_prob(cm, xd));
+      aom_write(w, is_compound, av1_get_reference_mode_prob(cm, xd));
     } else {
       assert((!is_compound) == (cm->reference_mode == SINGLE_REFERENCE));
     }
 
     if (is_compound) {
       aom_write(w, mbmi->ref_frame[0] == GOLDEN_FRAME,
-                vp10_get_pred_prob_comp_ref_p(cm, xd));
+                av1_get_pred_prob_comp_ref_p(cm, xd));
     } else {
       const int bit0 = mbmi->ref_frame[0] != LAST_FRAME;
-      aom_write(w, bit0, vp10_get_pred_prob_single_ref_p1(cm, xd));
+      aom_write(w, bit0, av1_get_pred_prob_single_ref_p1(cm, xd));
       if (bit0) {
         const int bit1 = mbmi->ref_frame[0] != GOLDEN_FRAME;
-        aom_write(w, bit1, vp10_get_pred_prob_single_ref_p2(cm, xd));
+        aom_write(w, bit1, av1_get_pred_prob_single_ref_p2(cm, xd));
       }
     }
   }
 }
 
-static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
+static void pack_inter_mode_mvs(AV1_COMP *cpi, const MODE_INFO *mi,
                                 aom_writer *w) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const nmv_context *nmvc = &cm->fc->nmvc;
   const MACROBLOCK *const x = &cpi->td.mb;
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -345,7 +345,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
   if (seg->update_map) {
     if (seg->temporal_update) {
       const int pred_flag = mbmi->seg_id_predicted;
-      aom_prob pred_prob = vp10_get_pred_prob_seg_id(segp, xd);
+      aom_prob pred_prob = av1_get_pred_prob_seg_id(segp, xd);
       aom_write(w, pred_flag, pred_prob);
       if (!pred_flag) write_segment_id(w, seg, segp, segment_id);
     } else {
@@ -356,7 +356,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
   skip = write_skip(cm, xd, segment_id, mi, w);
 
   if (!segfeature_active(seg, segment_id, SEG_LVL_REF_FRAME))
-    aom_write(w, is_inter, vp10_get_intra_inter_prob(cm, xd));
+    aom_write(w, is_inter, av1_get_intra_inter_prob(cm, xd));
 
   if (bsize >= BLOCK_8X8 && cm->tx_mode == TX_MODE_SELECT &&
       !(is_inter && skip) && !xd->lossless[segment_id]) {
@@ -391,8 +391,8 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
     }
 
     if (cm->interp_filter == SWITCHABLE) {
-      const int ctx = vp10_get_pred_context_switchable_interp(xd);
-      vp10_write_token(w, vp10_switchable_interp_tree,
+      const int ctx = av1_get_pred_context_switchable_interp(xd);
+      av1_write_token(w, av1_switchable_interp_tree,
                        cm->fc->switchable_interp_prob[ctx],
                        &switchable_interp_encodings[mbmi->interp_filter]);
       ++cpi->interp_filter_selected[0][mbmi->interp_filter];
@@ -411,7 +411,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
           write_inter_mode(w, b_mode, inter_probs);
           if (b_mode == NEWMV) {
             for (ref = 0; ref < 1 + is_compound; ++ref)
-              vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
+              av1_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
                              &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
                              nmvc, allow_hp);
           }
@@ -420,7 +420,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
     } else {
       if (mode == NEWMV) {
         for (ref = 0; ref < 1 + is_compound; ++ref)
-          vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
+          av1_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
                          &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
                          nmvc, allow_hp);
       }
@@ -429,12 +429,12 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
   if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
     if (is_inter) {
-      vp10_write_token(w, vp10_ext_tx_tree,
+      av1_write_token(w, av1_ext_tx_tree,
                        cm->fc->inter_ext_tx_prob[mbmi->tx_size],
                        &ext_tx_encodings[mbmi->tx_type]);
     } else {
-      vp10_write_token(
-          w, vp10_ext_tx_tree,
+      av1_write_token(
+          w, av1_ext_tx_tree,
           cm->fc->intra_ext_tx_prob[mbmi->tx_size]
                                    [intra_mode_to_tx_type_context[mbmi->mode]],
           &ext_tx_encodings[mbmi->tx_type]);
@@ -444,7 +444,7 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
   }
 }
 
-static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
+static void write_mb_modes_kf(const AV1_COMMON *cm, const MACROBLOCKD *xd,
                               MODE_INFO **mi_8x8, aom_writer *w) {
   const struct segmentation *const seg = &cm->seg;
 #if CONFIG_MISC_FIXES
@@ -487,19 +487,19 @@ static void write_mb_modes_kf(const VP10_COMMON *cm, const MACROBLOCKD *xd,
 
   if (mbmi->tx_size < TX_32X32 && cm->base_qindex > 0 && !mbmi->skip &&
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP)) {
-    vp10_write_token(
-        w, vp10_ext_tx_tree,
+    av1_write_token(
+        w, av1_ext_tx_tree,
         cm->fc->intra_ext_tx_prob[mbmi->tx_size]
                                  [intra_mode_to_tx_type_context[mbmi->mode]],
         &ext_tx_encodings[mbmi->tx_type]);
   }
 }
 
-static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
+static void write_modes_b(AV1_COMP *cpi, const TileInfo *const tile,
                           aom_writer *w, TOKENEXTRA **tok,
                           const TOKENEXTRA *const tok_end, int mi_row,
                           int mi_col) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
   MODE_INFO *m;
   int plane;
@@ -530,7 +530,7 @@ static void write_modes_b(VP10_COMP *cpi, const TileInfo *const tile,
   }
 }
 
-static void write_partition(const VP10_COMMON *const cm,
+static void write_partition(const AV1_COMMON *const cm,
                             const MACROBLOCKD *const xd, int hbs, int mi_row,
                             int mi_col, PARTITION_TYPE p, BLOCK_SIZE bsize,
                             aom_writer *w) {
@@ -540,7 +540,7 @@ static void write_partition(const VP10_COMMON *const cm,
   const int has_cols = (mi_col + hbs) < cm->mi_cols;
 
   if (has_rows && has_cols) {
-    vp10_write_token(w, vp10_partition_tree, probs, &partition_encodings[p]);
+    av1_write_token(w, av1_partition_tree, probs, &partition_encodings[p]);
   } else if (!has_rows && has_cols) {
     assert(p == PARTITION_SPLIT || p == PARTITION_HORZ);
     aom_write(w, p == PARTITION_SPLIT, probs[1]);
@@ -552,11 +552,11 @@ static void write_partition(const VP10_COMMON *const cm,
   }
 }
 
-static void write_modes_sb(VP10_COMP *cpi, const TileInfo *const tile,
+static void write_modes_sb(AV1_COMP *cpi, const TileInfo *const tile,
                            aom_writer *w, TOKENEXTRA **tok,
                            const TOKENEXTRA *const tok_end, int mi_row,
                            int mi_col, BLOCK_SIZE bsize) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
   const int bsl = b_width_log2_lookup[bsize];
@@ -617,7 +617,7 @@ static void write_modes_sb(VP10_COMP *cpi, const TileInfo *const tile,
 #endif
 }
 
-static void write_modes(VP10_COMP *cpi, const TileInfo *const tile,
+static void write_modes(AV1_COMP *cpi, const TileInfo *const tile,
                         aom_writer *w, TOKENEXTRA **tok,
                         const TOKENEXTRA *const tok_end) {
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
@@ -625,17 +625,17 @@ static void write_modes(VP10_COMP *cpi, const TileInfo *const tile,
 
   for (mi_row = tile->mi_row_start; mi_row < tile->mi_row_end;
        mi_row += MI_BLOCK_SIZE) {
-    vp10_zero(xd->left_seg_context);
+    av1_zero(xd->left_seg_context);
     for (mi_col = tile->mi_col_start; mi_col < tile->mi_col_end;
          mi_col += MI_BLOCK_SIZE)
       write_modes_sb(cpi, tile, w, tok, tok_end, mi_row, mi_col, BLOCK_64X64);
   }
 }
 
-static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
-                                    vp10_coeff_stats *coef_branch_ct,
-                                    vp10_coeff_probs_model *coef_probs) {
-  vp10_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
+static void build_tree_distribution(AV1_COMP *cpi, TX_SIZE tx_size,
+                                    av1_coeff_stats *coef_branch_ct,
+                                    av1_coeff_probs_model *coef_probs) {
+  av1_coeff_count *coef_counts = cpi->td.rd_counts.coef_counts[tx_size];
   unsigned int(*eob_branch_ct)[REF_TYPES][COEF_BANDS][COEFF_CONTEXTS] =
       cpi->common.counts.eob_branch[tx_size];
   int i, j, k, l, m;
@@ -644,7 +644,7 @@ static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
     for (j = 0; j < REF_TYPES; ++j) {
       for (k = 0; k < COEF_BANDS; ++k) {
         for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
-          vp10_tree_probs_from_distribution(vp10_coef_tree,
+          av1_tree_probs_from_distribution(av1_coef_tree,
                                             coef_branch_ct[i][j][k][l],
                                             coef_counts[i][j][k][l]);
           coef_branch_ct[i][j][k][l][0][1] =
@@ -659,11 +659,11 @@ static void build_tree_distribution(VP10_COMP *cpi, TX_SIZE tx_size,
   }
 }
 
-static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
+static void update_coef_probs_common(aom_writer *const bc, AV1_COMP *cpi,
                                      TX_SIZE tx_size,
-                                     vp10_coeff_stats *frame_branch_ct,
-                                     vp10_coeff_probs_model *new_coef_probs) {
-  vp10_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
+                                     av1_coeff_stats *frame_branch_ct,
+                                     av1_coeff_probs_model *new_coef_probs) {
+  av1_coeff_probs_model *old_coef_probs = cpi->common.fc->coef_probs[tx_size];
   const aom_prob upd = DIFF_UPDATE_PROB;
   const int entropy_nodes_update = UNCONSTRAINED_NODES;
   int i, j, k, l, t;
@@ -684,17 +684,17 @@ static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
                 int s;
                 int u = 0;
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_diff_update_savings_search_model(
+                  s = av1_prob_diff_update_savings_search_model(
                       frame_branch_ct[i][j][k][l][0],
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 else
-                  s = vp10_prob_diff_update_savings_search(
+                  s = av1_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], oldp, &newp, upd);
                 if (s > 0 && newp != oldp) u = 1;
                 if (u)
-                  savings += s - (int)(vp10_cost_zero(upd));
+                  savings += s - (int)(av1_cost_zero(upd));
                 else
-                  savings -= (int)(vp10_cost_zero(upd));
+                  savings -= (int)(av1_cost_zero(upd));
                 update[u]++;
               }
             }
@@ -721,17 +721,17 @@ static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
                 int s;
                 int u = 0;
                 if (t == PIVOT_NODE)
-                  s = vp10_prob_diff_update_savings_search_model(
+                  s = av1_prob_diff_update_savings_search_model(
                       frame_branch_ct[i][j][k][l][0],
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 else
-                  s = vp10_prob_diff_update_savings_search(
+                  s = av1_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
                 if (s > 0 && newp != *oldp) u = 1;
                 aom_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
-                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  av1_write_prob_diff_update(bc, newp, *oldp);
                   *oldp = newp;
                 }
               }
@@ -757,11 +757,11 @@ static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
                 int u = 0;
 
                 if (t == PIVOT_NODE) {
-                  s = vp10_prob_diff_update_savings_search_model(
+                  s = av1_prob_diff_update_savings_search_model(
                       frame_branch_ct[i][j][k][l][0],
                       old_coef_probs[i][j][k][l], &newp, upd, stepsize);
                 } else {
-                  s = vp10_prob_diff_update_savings_search(
+                  s = av1_prob_diff_update_savings_search(
                       frame_branch_ct[i][j][k][l][t], *oldp, &newp, upd);
                 }
 
@@ -781,7 +781,7 @@ static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
                 aom_write(bc, u, upd);
                 if (u) {
                   /* send/use new probability */
-                  vp10_write_prob_diff_update(bc, newp, *oldp);
+                  av1_write_prob_diff_update(bc, newp, *oldp);
                   *oldp = newp;
                 }
               }
@@ -798,13 +798,13 @@ static void update_coef_probs_common(aom_writer *const bc, VP10_COMP *cpi,
   }
 }
 
-static void update_coef_probs(VP10_COMP *cpi, aom_writer *w) {
+static void update_coef_probs(AV1_COMP *cpi, aom_writer *w) {
   const TX_MODE tx_mode = cpi->common.tx_mode;
   const TX_SIZE max_tx_size = tx_mode_to_biggest_tx_size[tx_mode];
   TX_SIZE tx_size;
   for (tx_size = TX_4X4; tx_size <= max_tx_size; ++tx_size) {
-    vp10_coeff_stats frame_branch_ct[PLANE_TYPES];
-    vp10_coeff_probs_model frame_coef_probs[PLANE_TYPES];
+    av1_coeff_stats frame_branch_ct[PLANE_TYPES];
+    av1_coeff_probs_model frame_coef_probs[PLANE_TYPES];
     if (cpi->td.counts->tx.tx_totals[tx_size] <= 20 ||
         (tx_size >= TX_16X16 && cpi->sf.tx_size_search_method == USE_TX_8X8)) {
       aom_write_bit(w, 0);
@@ -855,7 +855,7 @@ static void encode_loopfilter(struct loopfilter *lf,
 }
 
 #if CONFIG_CLPF
-static void encode_clpf(const VP10_COMMON *cm,
+static void encode_clpf(const AV1_COMMON *cm,
                         struct aom_write_bit_buffer *wb) {
   aom_wb_write_literal(wb, cm->clpf, 1);
 }
@@ -876,7 +876,7 @@ static void write_delta_q(struct aom_write_bit_buffer *wb, int delta_q) {
   }
 }
 
-static void encode_quantization(const VP10_COMMON *const cm,
+static void encode_quantization(const AV1_COMMON *const cm,
                                 struct aom_write_bit_buffer *wb) {
   aom_wb_write_literal(wb, cm->base_qindex, QINDEX_BITS);
   write_delta_q(wb, cm->y_dc_delta_q);
@@ -891,7 +891,7 @@ static void encode_quantization(const VP10_COMMON *const cm,
 #endif
 }
 
-static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
+static void encode_segmentation(AV1_COMMON *cm, MACROBLOCKD *xd,
                                 struct aom_write_bit_buffer *wb) {
   int i, j;
 
@@ -911,7 +911,7 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
   }
   if (seg->update_map) {
     // Select the coding strategy (temporal or spatial)
-    vp10_choose_segmap_coding_method(cm, xd);
+    av1_choose_segmap_coding_method(cm, xd);
 #if !CONFIG_MISC_FIXES
     // Write out probabilities used to decode unpredicted  macro-block segments
     for (i = 0; i < SEG_TREE_PROBS; i++) {
@@ -952,9 +952,9 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
         aom_wb_write_bit(wb, active);
         if (active) {
           const int data = get_segdata(seg, i, j);
-          const int data_max = vp10_seg_feature_data_max(j);
+          const int data_max = av1_seg_feature_data_max(j);
 
-          if (vp10_is_segfeature_signed(j)) {
+          if (av1_is_segfeature_signed(j)) {
             encode_unsigned_max(wb, abs(data), data_max);
             aom_wb_write_bit(wb, data < 0);
           } else {
@@ -967,8 +967,8 @@ static void encode_segmentation(VP10_COMMON *cm, MACROBLOCKD *xd,
 }
 
 #if CONFIG_MISC_FIXES
-static void update_seg_probs(VP10_COMP *cpi, aom_writer *w) {
-  VP10_COMMON *cm = &cpi->common;
+static void update_seg_probs(AV1_COMP *cpi, aom_writer *w) {
+  AV1_COMMON *cm = &cpi->common;
 
   if (!cpi->common.seg.enabled) return;
 
@@ -976,13 +976,13 @@ static void update_seg_probs(VP10_COMP *cpi, aom_writer *w) {
     int i;
 
     for (i = 0; i < PREDICTION_PROBS; i++)
-      vp10_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
+      av1_cond_prob_diff_update(w, &cm->fc->seg.pred_probs[i],
                                  cm->counts.seg.pred[i]);
 
-    prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+    prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
                      cm->counts.seg.tree_mispred, MAX_SEGMENTS, w);
   } else {
-    prob_diff_update(vp10_segment_tree, cm->fc->seg.tree_probs,
+    prob_diff_update(av1_segment_tree, cm->fc->seg.tree_probs,
                      cm->counts.seg.tree_total, MAX_SEGMENTS, w);
   }
 }
@@ -998,7 +998,7 @@ static void write_txfm_mode(TX_MODE mode, struct aom_writer *wb) {
 }
 #endif
 
-static void update_txfm_probs(VP10_COMMON *cm, aom_writer *w,
+static void update_txfm_probs(AV1_COMMON *cm, aom_writer *w,
                               FRAME_COUNTS *counts) {
   if (cm->tx_mode == TX_MODE_SELECT) {
     int i, j;
@@ -1007,22 +1007,22 @@ static void update_txfm_probs(VP10_COMMON *cm, aom_writer *w,
     unsigned int ct_32x32p[TX_SIZES - 1][2];
 
     for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
-      vp10_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
+      av1_tx_counts_to_branch_counts_8x8(counts->tx.p8x8[i], ct_8x8p);
       for (j = 0; j < TX_SIZES - 3; j++)
-        vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
+        av1_cond_prob_diff_update(w, &cm->fc->tx_probs.p8x8[i][j], ct_8x8p[j]);
     }
 
     for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
-      vp10_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
+      av1_tx_counts_to_branch_counts_16x16(counts->tx.p16x16[i], ct_16x16p);
       for (j = 0; j < TX_SIZES - 2; j++)
-        vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
+        av1_cond_prob_diff_update(w, &cm->fc->tx_probs.p16x16[i][j],
                                    ct_16x16p[j]);
     }
 
     for (i = 0; i < TX_SIZE_CONTEXTS; i++) {
-      vp10_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
+      av1_tx_counts_to_branch_counts_32x32(counts->tx.p32x32[i], ct_32x32p);
       for (j = 0; j < TX_SIZES - 1; j++)
-        vp10_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
+        av1_cond_prob_diff_update(w, &cm->fc->tx_probs.p32x32[i][j],
                                    ct_32x32p[j]);
     }
   }
@@ -1034,7 +1034,7 @@ static void write_interp_filter(INTERP_FILTER filter,
   if (filter != SWITCHABLE) aom_wb_write_literal(wb, filter, 2);
 }
 
-static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
+static void fix_interp_filter(AV1_COMMON *cm, FRAME_COUNTS *counts) {
   if (cm->interp_filter == SWITCHABLE) {
     // Check to see if only one of the filters is actually used
     int count[SWITCHABLE_FILTERS];
@@ -1057,10 +1057,10 @@ static void fix_interp_filter(VP10_COMMON *cm, FRAME_COUNTS *counts) {
   }
 }
 
-static void write_tile_info(const VP10_COMMON *const cm,
+static void write_tile_info(const AV1_COMMON *const cm,
                             struct aom_write_bit_buffer *wb) {
   int min_log2_tile_cols, max_log2_tile_cols, ones;
-  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+  av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
   // columns
   ones = cm->log2_tile_cols - min_log2_tile_cols;
@@ -1073,13 +1073,13 @@ static void write_tile_info(const VP10_COMMON *const cm,
   if (cm->log2_tile_rows != 0) aom_wb_write_bit(wb, cm->log2_tile_rows != 1);
 }
 
-static int get_refresh_mask(VP10_COMP *cpi) {
-  if (vp10_preserve_existing_gf(cpi)) {
+static int get_refresh_mask(AV1_COMP *cpi) {
+  if (av1_preserve_existing_gf(cpi)) {
     // We have decided to preserve the previously existing golden frame as our
     // new ARF frame. However, in the short term we leave it in the GF slot and,
     // if we're updating the GF with the current decoded frame, we save it
     // instead to the ARF slot.
-    // Later, in the function vp10_encoder.c:vp10_update_reference_frames() we
+    // Later, in the function av1_encoder.c:av1_update_reference_frames() we
     // will swap gld_fb_idx and alt_fb_idx to achieve our objective. We do it
     // there so that it can be done outside of the recode loop.
     // Note: This is highly specific to the use of ARF as a forward reference,
@@ -1099,9 +1099,9 @@ static int get_refresh_mask(VP10_COMP *cpi) {
   }
 }
 
-static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
+static size_t encode_tiles(AV1_COMP *cpi, uint8_t *data_ptr,
                            unsigned int *max_tile_sz) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   aom_writer residual_bc;
   int tile_row, tile_col;
   TOKENEXTRA *tok_end;
@@ -1149,7 +1149,7 @@ static size_t encode_tiles(VP10_COMP *cpi, uint8_t *data_ptr,
   return total_size;
 }
 
-static void write_render_size(const VP10_COMMON *cm,
+static void write_render_size(const AV1_COMMON *cm,
                               struct aom_write_bit_buffer *wb) {
   const int scaling_active =
       cm->width != cm->render_width || cm->height != cm->render_height;
@@ -1160,7 +1160,7 @@ static void write_render_size(const VP10_COMMON *cm,
   }
 }
 
-static void write_frame_size(const VP10_COMMON *cm,
+static void write_frame_size(const AV1_COMMON *cm,
                              struct aom_write_bit_buffer *wb) {
   aom_wb_write_literal(wb, cm->width - 1, 16);
   aom_wb_write_literal(wb, cm->height - 1, 16);
@@ -1168,9 +1168,9 @@ static void write_frame_size(const VP10_COMMON *cm,
   write_render_size(cm, wb);
 }
 
-static void write_frame_size_with_refs(VP10_COMP *cpi,
+static void write_frame_size_with_refs(AV1_COMP *cpi,
                                        struct aom_write_bit_buffer *wb) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   int found = 0;
 
   MV_REFERENCE_FRAME ref_frame;
@@ -1206,9 +1206,9 @@ static void write_frame_size_with_refs(VP10_COMP *cpi,
 }
 
 static void write_sync_code(struct aom_write_bit_buffer *wb) {
-  aom_wb_write_literal(wb, VP10_SYNC_CODE_0, 8);
-  aom_wb_write_literal(wb, VP10_SYNC_CODE_1, 8);
-  aom_wb_write_literal(wb, VP10_SYNC_CODE_2, 8);
+  aom_wb_write_literal(wb, AV1_SYNC_CODE_0, 8);
+  aom_wb_write_literal(wb, AV1_SYNC_CODE_1, 8);
+  aom_wb_write_literal(wb, AV1_SYNC_CODE_2, 8);
 }
 
 static void write_profile(BITSTREAM_PROFILE profile,
@@ -1223,7 +1223,7 @@ static void write_profile(BITSTREAM_PROFILE profile,
 }
 
 static void write_bitdepth_colorspace_sampling(
-    VP10_COMMON *const cm, struct aom_write_bit_buffer *wb) {
+    AV1_COMMON *const cm, struct aom_write_bit_buffer *wb) {
   if (cm->profile >= PROFILE_2) {
     assert(cm->bit_depth > VPX_BITS_8);
     aom_wb_write_bit(wb, cm->bit_depth == VPX_BITS_10 ? 0 : 1);
@@ -1246,9 +1246,9 @@ static void write_bitdepth_colorspace_sampling(
   }
 }
 
-static void write_uncompressed_header(VP10_COMP *cpi,
+static void write_uncompressed_header(AV1_COMP *cpi,
                                       struct aom_write_bit_buffer *wb) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
   aom_wb_write_literal(wb, VPX_FRAME_MARKER, 2);
@@ -1358,8 +1358,8 @@ static void write_uncompressed_header(VP10_COMP *cpi,
   write_tile_info(cm, wb);
 }
 
-static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
-  VP10_COMMON *const cm = &cpi->common;
+static size_t write_compressed_header(AV1_COMP *cpi, uint8_t *data) {
+  AV1_COMMON *const cm = &cpi->common;
   FRAME_CONTEXT *const fc = cm->fc;
   FRAME_COUNTS *counts = cpi->td.counts;
   aom_writer header_bc;
@@ -1386,32 +1386,32 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
   update_seg_probs(cpi, &header_bc);
 
   for (i = 0; i < INTRA_MODES; ++i)
-    prob_diff_update(vp10_intra_mode_tree, fc->uv_mode_prob[i],
+    prob_diff_update(av1_intra_mode_tree, fc->uv_mode_prob[i],
                      counts->uv_mode[i], INTRA_MODES, &header_bc);
 
   for (i = 0; i < PARTITION_CONTEXTS; ++i)
-    prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+    prob_diff_update(av1_partition_tree, fc->partition_prob[i],
                      counts->partition[i], PARTITION_TYPES, &header_bc);
 #endif
 
   if (frame_is_intra_only(cm)) {
-    vp10_copy(cm->kf_y_prob, vp10_kf_y_mode_prob);
+    av1_copy(cm->kf_y_prob, av1_kf_y_mode_prob);
 #if CONFIG_MISC_FIXES
     for (i = 0; i < INTRA_MODES; ++i)
       for (j = 0; j < INTRA_MODES; ++j)
-        prob_diff_update(vp10_intra_mode_tree, cm->kf_y_prob[i][j],
+        prob_diff_update(av1_intra_mode_tree, cm->kf_y_prob[i][j],
                          counts->kf_y_mode[i][j], INTRA_MODES, &header_bc);
 #endif
   } else {
     for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
-      prob_diff_update(vp10_inter_mode_tree, cm->fc->inter_mode_probs[i],
+      prob_diff_update(av1_inter_mode_tree, cm->fc->inter_mode_probs[i],
                        counts->inter_mode[i], INTER_MODES, &header_bc);
 
     if (cm->interp_filter == SWITCHABLE)
       update_switchable_interp_probs(cm, &header_bc, counts);
 
     for (i = 0; i < INTRA_INTER_CONTEXTS; i++)
-      vp10_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
+      av1_cond_prob_diff_update(&header_bc, &fc->intra_inter_prob[i],
                                  counts->intra_inter[i]);
 
     if (cpi->allow_comp_inter_inter) {
@@ -1424,42 +1424,42 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
         aom_write_bit(&header_bc, use_hybrid_pred);
         if (use_hybrid_pred)
           for (i = 0; i < COMP_INTER_CONTEXTS; i++)
-            vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
+            av1_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
                                        counts->comp_inter[i]);
       }
 #else
       if (use_hybrid_pred)
         for (i = 0; i < COMP_INTER_CONTEXTS; i++)
-          vp10_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
+          av1_cond_prob_diff_update(&header_bc, &fc->comp_inter_prob[i],
                                      counts->comp_inter[i]);
 #endif
     }
 
     if (cm->reference_mode != COMPOUND_REFERENCE) {
       for (i = 0; i < REF_CONTEXTS; i++) {
-        vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
+        av1_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][0],
                                    counts->single_ref[i][0]);
-        vp10_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
+        av1_cond_prob_diff_update(&header_bc, &fc->single_ref_prob[i][1],
                                    counts->single_ref[i][1]);
       }
     }
 
     if (cm->reference_mode != SINGLE_REFERENCE)
       for (i = 0; i < REF_CONTEXTS; i++)
-        vp10_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
+        av1_cond_prob_diff_update(&header_bc, &fc->comp_ref_prob[i],
                                    counts->comp_ref[i]);
 
     for (i = 0; i < BLOCK_SIZE_GROUPS; ++i)
-      prob_diff_update(vp10_intra_mode_tree, cm->fc->y_mode_prob[i],
+      prob_diff_update(av1_intra_mode_tree, cm->fc->y_mode_prob[i],
                        counts->y_mode[i], INTRA_MODES, &header_bc);
 
 #if !CONFIG_MISC_FIXES
     for (i = 0; i < PARTITION_CONTEXTS; ++i)
-      prob_diff_update(vp10_partition_tree, fc->partition_prob[i],
+      prob_diff_update(av1_partition_tree, fc->partition_prob[i],
                        counts->partition[i], PARTITION_TYPES, &header_bc);
 #endif
 
-    vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
+    av1_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
                          &counts->mv);
     update_ext_tx_probs(cm, &header_bc);
   }
@@ -1505,14 +1505,14 @@ static int remux_tiles(uint8_t *dest, const int sz, const int n_tiles,
 }
 #endif
 
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size) {
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dest, size_t *size) {
   uint8_t *data = dest;
   size_t first_part_size, uncompressed_hdr_size, data_sz;
   struct aom_write_bit_buffer wb = { data, 0 };
   struct aom_write_bit_buffer saved_wb;
   unsigned int max_tile;
 #if CONFIG_MISC_FIXES
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int n_log2_tiles = cm->log2_tile_rows + cm->log2_tile_cols;
   const int have_tiles = n_log2_tiles > 0;
 #else
diff --git a/av1/encoder/bitstream.h b/av1/encoder/bitstream.h
index e8e4581d78130ba73efd29cef0cd1b4c30a8a655..a9bb97af26e9935e88be69aa4f74838f51171fd0 100644
--- a/av1/encoder/bitstream.h
+++ b/av1/encoder/bitstream.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_BITSTREAM_H_
-#define VP10_ENCODER_BITSTREAM_H_
+#ifndef AV1_ENCODER_BITSTREAM_H_
+#define AV1_ENCODER_BITSTREAM_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -18,10 +18,10 @@ extern "C" {
 
 #include "av1/encoder/encoder.h"
 
-void vp10_encode_token_init();
-void vp10_pack_bitstream(VP10_COMP *const cpi, uint8_t *dest, size_t *size);
+void av1_encode_token_init();
+void av1_pack_bitstream(AV1_COMP *const cpi, uint8_t *dest, size_t *size);
 
-static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
+static INLINE int av1_preserve_existing_gf(AV1_COMP *cpi) {
   return !cpi->multi_arf_allowed && cpi->refresh_golden_frame &&
          cpi->rc.is_src_frame_alt_ref;
 }
@@ -30,4 +30,4 @@ static INLINE int vp10_preserve_existing_gf(VP10_COMP *cpi) {
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_BITSTREAM_H_
+#endif  // AV1_ENCODER_BITSTREAM_H_
diff --git a/av1/encoder/block.h b/av1/encoder/block.h
index 1346ba6a218e2e35edbc8a3e05c6075ad0f61b41..45d853c6807e84856f3c28d99a8661f75ac6162b 100644
--- a/av1/encoder/block.h
+++ b/av1/encoder/block.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_BLOCK_H_
-#define VP10_ENCODER_BLOCK_H_
+#ifndef AV1_ENCODER_BLOCK_H_
+#define AV1_ENCODER_BLOCK_H_
 
 #include "av1/common/entropymv.h"
 #include "av1/common/entropy.h"
@@ -45,7 +45,7 @@ struct macroblock_plane {
 
 /* The [2] dimension is for whether we skip the EOB node (i.e. if previous
  * coefficient in this block was zero) or not. */
-typedef unsigned int vp10_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
+typedef unsigned int av1_coeff_cost[PLANE_TYPES][REF_TYPES][COEF_BANDS][2]
                                     [COEFF_CONTEXTS][ENTROPY_TOKENS];
 
 typedef struct {
@@ -117,7 +117,7 @@ struct macroblock {
   int encode_breakout;
 
   // note that token_costs is the cost when eob node is skipped
-  vp10_coeff_cost token_costs[TX_SIZES];
+  av1_coeff_cost token_costs[TX_SIZES];
 
   int optimize;
 
@@ -147,4 +147,4 @@ struct macroblock {
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_BLOCK_H_
+#endif  // AV1_ENCODER_BLOCK_H_
diff --git a/av1/encoder/blockiness.c b/av1/encoder/blockiness.c
index 36d44109173d00ec13346ae6c21d3e1bcb71327b..49c5dad32929ebf161e32ce03b9a150b63e9536f 100644
--- a/av1/encoder/blockiness.c
+++ b/av1/encoder/blockiness.c
@@ -120,7 +120,7 @@ static int blockiness_horizontal(const uint8_t *s, int sp, const uint8_t *r,
 
 // This function returns the blockiness for the entire frame currently by
 // looking at all borders in steps of 4.
-double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
+double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
                            const unsigned char *img2, int img2_pitch, int width,
                            int height) {
   double blockiness = 0;
diff --git a/av1/encoder/context_tree.c b/av1/encoder/context_tree.c
index d89022ecff284a84292150cced7fb6ced2a14f8b..1a5a6635e4eb5a7366f2876a036e6075500c72db 100644
--- a/av1/encoder/context_tree.c
+++ b/av1/encoder/context_tree.c
@@ -16,7 +16,7 @@ static const BLOCK_SIZE square[] = {
   BLOCK_8X8, BLOCK_16X16, BLOCK_32X32, BLOCK_64X64,
 };
 
-static void alloc_mode_context(VP10_COMMON *cm, int num_4x4_blk,
+static void alloc_mode_context(AV1_COMMON *cm, int num_4x4_blk,
                                PICK_MODE_CONTEXT *ctx) {
   const int num_blk = (num_4x4_blk < 4 ? 4 : num_4x4_blk);
   const int num_pix = num_blk << 4;
@@ -65,7 +65,7 @@ static void free_mode_context(PICK_MODE_CONTEXT *ctx) {
   }
 }
 
-static void alloc_tree_contexts(VP10_COMMON *cm, PC_TREE *tree,
+static void alloc_tree_contexts(AV1_COMMON *cm, PC_TREE *tree,
                                 int num_4x4_blk) {
   alloc_mode_context(cm, num_4x4_blk, &tree->none);
   alloc_mode_context(cm, num_4x4_blk / 2, &tree->horizontal[0]);
@@ -92,7 +92,7 @@ static void free_tree_contexts(PC_TREE *tree) {
 // partition level. There are contexts for none, horizontal, vertical, and
 // split.  Along with a block_size value and a selected block_size which
 // represents the state of our search.
-void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
+void av1_setup_pc_tree(AV1_COMMON *cm, ThreadData *td) {
   int i, j;
   const int leaf_nodes = 64;
   const int tree_nodes = 64 + 16 + 4 + 1;
@@ -141,7 +141,7 @@ void vp10_setup_pc_tree(VP10_COMMON *cm, ThreadData *td) {
   td->pc_root[0].none.best_mode_index = 2;
 }
 
-void vp10_free_pc_tree(ThreadData *td) {
+void av1_free_pc_tree(ThreadData *td) {
   const int tree_nodes = 64 + 16 + 4 + 1;
   int i;
 
diff --git a/av1/encoder/context_tree.h b/av1/encoder/context_tree.h
index c22b8660d1eeee44db516a8a5f21c31692511a82..391ff907bec485e3f5e093bceca4c64ab9393126 100644
--- a/av1/encoder/context_tree.h
+++ b/av1/encoder/context_tree.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_CONTEXT_TREE_H_
-#define VP10_ENCODER_CONTEXT_TREE_H_
+#ifndef AV1_ENCODER_CONTEXT_TREE_H_
+#define AV1_ENCODER_CONTEXT_TREE_H_
 
 #include "av1/common/blockd.h"
 #include "av1/encoder/block.h"
@@ -19,8 +19,8 @@
 extern "C" {
 #endif
 
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
 struct ThreadData;
 
 // Structure to hold snapshot of coding context during the mode picking process
@@ -78,11 +78,11 @@ typedef struct PC_TREE {
   };
 } PC_TREE;
 
-void vp10_setup_pc_tree(struct VP10Common *cm, struct ThreadData *td);
-void vp10_free_pc_tree(struct ThreadData *td);
+void av1_setup_pc_tree(struct AV1Common *cm, struct ThreadData *td);
+void av1_free_pc_tree(struct ThreadData *td);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif /* VP10_ENCODER_CONTEXT_TREE_H_ */
+#endif /* AV1_ENCODER_CONTEXT_TREE_H_ */
diff --git a/av1/encoder/cost.c b/av1/encoder/cost.c
index acf4a4722f7d0b5a426dc782c3ba8b04fa626a47..a84f7d399280fb68f707cdaa5f22239efb70bcc5 100644
--- a/av1/encoder/cost.c
+++ b/av1/encoder/cost.c
@@ -15,7 +15,7 @@
 /* round(-log2(i/256.) * (1 << VP9_PROB_COST_SHIFT))
    Begins and ends with a bogus entry to satisfy use of prob=0 in the firstpass.
    https://code.google.com/p/webm/issues/detail?id=1089 */
-const uint16_t vp10_prob_cost[257] = {
+const uint16_t av1_prob_cost[257] = {
   4096, 4096, 3584, 3284, 3072, 2907, 2772, 2659, 2560, 2473, 2395, 2325, 2260,
   2201, 2147, 2096, 2048, 2003, 1961, 1921, 1883, 1847, 1813, 1780, 1748, 1718,
   1689, 1661, 1635, 1609, 1584, 1559, 1536, 1513, 1491, 1470, 1449, 1429, 1409,
@@ -44,7 +44,7 @@ static void cost(int *costs, aom_tree tree, const aom_prob *probs, int i,
   int b;
 
   for (b = 0; b <= 1; ++b) {
-    const int cc = c + vp10_cost_bit(prob, b);
+    const int cc = c + av1_cost_bit(prob, b);
     const aom_tree_index ii = tree[i + b];
 
     if (ii <= 0)
@@ -54,13 +54,13 @@ static void cost(int *costs, aom_tree tree, const aom_prob *probs, int i,
   }
 }
 
-void vp10_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree) {
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree) {
   cost(costs, tree, probs, 0, 0);
 }
 
-void vp10_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree) {
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree) {
   assert(tree[0] <= 0 && tree[1] > 0);
 
-  costs[-tree[0]] = vp10_cost_bit(probs[0], 0);
+  costs[-tree[0]] = av1_cost_bit(probs[0], 0);
   cost(costs, tree, probs, 2, 0);
 }
diff --git a/av1/encoder/cost.h b/av1/encoder/cost.h
index be12464a0b4c5e84c4e1cdeec1daca3accc23d2e..374f7d9ab58df664d85664e7e3b0e588b4095aac 100644
--- a/av1/encoder/cost.h
+++ b/av1/encoder/cost.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_COST_H_
-#define VP10_ENCODER_COST_H_
+#ifndef AV1_ENCODER_COST_H_
+#define AV1_ENCODER_COST_H_
 
 #include "aom_dsp/prob.h"
 #include "aom/aom_integer.h"
@@ -19,20 +19,20 @@
 extern "C" {
 #endif
 
-extern const uint16_t vp10_prob_cost[257];
+extern const uint16_t av1_prob_cost[257];
 
-// The factor to scale from cost in bits to cost in vp10_prob_cost units.
+// The factor to scale from cost in bits to cost in av1_prob_cost units.
 #define VP9_PROB_COST_SHIFT 9
 
-#define vp10_cost_zero(prob) (vp10_prob_cost[prob])
+#define av1_cost_zero(prob) (av1_prob_cost[prob])
 
-#define vp10_cost_one(prob) vp10_cost_zero(256 - (prob))
+#define av1_cost_one(prob) av1_cost_zero(256 - (prob))
 
-#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) : (prob))
+#define av1_cost_bit(prob, bit) av1_cost_zero((bit) ? 256 - (prob) : (prob))
 
 static INLINE unsigned int cost_branch256(const unsigned int ct[2],
                                           aom_prob p) {
-  return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
+  return ct[0] * av1_cost_zero(p) + ct[1] * av1_cost_one(p);
 }
 
 static INLINE int treed_cost(aom_tree tree, const aom_prob *probs, int bits,
@@ -42,18 +42,18 @@ static INLINE int treed_cost(aom_tree tree, const aom_prob *probs, int bits,
 
   do {
     const int bit = (bits >> --len) & 1;
-    cost += vp10_cost_bit(probs[i >> 1], bit);
+    cost += av1_cost_bit(probs[i >> 1], bit);
     i = tree[i + bit];
   } while (len);
 
   return cost;
 }
 
-void vp10_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree);
-void vp10_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree);
+void av1_cost_tokens(int *costs, const aom_prob *probs, aom_tree tree);
+void av1_cost_tokens_skip(int *costs, const aom_prob *probs, aom_tree tree);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_COST_H_
+#endif  // AV1_ENCODER_COST_H_
diff --git a/av1/encoder/dct.c b/av1/encoder/dct.c
index 30456f805e4d1b0c0e23a0916098206d314429ec..803c9627d91c6a3a8160f61b26d5451343546dcf 100644
--- a/av1/encoder/dct.c
+++ b/av1/encoder/dct.c
@@ -293,7 +293,7 @@ static void fdct16(const tran_low_t in[16], tran_low_t out[16]) {
   out[15] = (tran_low_t)fdct_round_shift(temp2);
 }
 
-/* TODO(angiebird): Unify this with vp10_fwd_txfm.c: vp10_fdct32
+/* TODO(angiebird): Unify this with av1_fwd_txfm.c: av1_fdct32
 static void fdct32(const tran_low_t *input, tran_low_t *output) {
   tran_high_t temp;
   tran_low_t step[32];
@@ -988,7 +988,7 @@ static const transform_2d FHT_16[] = {
   { fadst16, fadst16 }  // ADST_ADST = 3
 };
 
-void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
                    int tx_type) {
   if (tx_type == DCT_DCT) {
     aom_fdct4x4_c(input, output, stride);
@@ -1015,7 +1015,7 @@ void vp10_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
   }
 }
 
-void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
+void av1_fdct8x8_quant_c(const int16_t *input, int stride,
                           tran_low_t *coeff_ptr, intptr_t n_coeffs,
                           int skip_block, const int16_t *zbin_ptr,
                           const int16_t *round_ptr, const int16_t *quant_ptr,
@@ -1141,7 +1141,7 @@ void vp10_fdct8x8_quant_c(const int16_t *input, int stride,
   *eob_ptr = eob + 1;
 }
 
-void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
                    int tx_type) {
   if (tx_type == DCT_DCT) {
     aom_fdct8x8_c(input, output, stride);
@@ -1170,7 +1170,7 @@ void vp10_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
 
 /* 4-point reversible, orthonormal Walsh-Hadamard in 3.5 adds, 0.5 shifts per
    pixel. */
-void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
+void av1_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   int i;
   tran_high_t a1, b1, c1, d1, e1;
   const int16_t *ip_pass0 = input;
@@ -1224,7 +1224,7 @@ void vp10_fwht4x4_c(const int16_t *input, tran_low_t *output, int stride) {
   }
 }
 
-void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
                      int tx_type) {
   if (tx_type == DCT_DCT) {
     aom_fdct16x16_c(input, output, stride);
@@ -1251,24 +1251,24 @@ void vp10_fht16x16_c(const int16_t *input, tran_low_t *output, int stride,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fht4x4_c(const int16_t *input, tran_low_t *output, int stride,
                           int tx_type) {
-  vp10_fht4x4_c(input, output, stride, tx_type);
+  av1_fht4x4_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
+void av1_highbd_fht8x8_c(const int16_t *input, tran_low_t *output, int stride,
                           int tx_type) {
-  vp10_fht8x8_c(input, output, stride, tx_type);
+  av1_fht8x8_c(input, output, stride, tx_type);
 }
 
-void vp10_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fwht4x4_c(const int16_t *input, tran_low_t *output,
                            int stride) {
-  vp10_fwht4x4_c(input, output, stride);
+  av1_fwht4x4_c(input, output, stride);
 }
 
-void vp10_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
+void av1_highbd_fht16x16_c(const int16_t *input, tran_low_t *output,
                             int stride, int tx_type) {
-  vp10_fht16x16_c(input, output, stride, tx_type);
+  av1_fht16x16_c(input, output, stride, tx_type);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/encodeframe.c b/av1/encoder/encodeframe.c
index 3495262aff020999a43989c01f955ccb84ce5079..f5808ad3b4e47098211db11c22aa9bd72ce0c952 100644
--- a/av1/encoder/encodeframe.c
+++ b/av1/encoder/encodeframe.c
@@ -47,7 +47,7 @@
 #include "av1/encoder/segmentation.h"
 #include "av1/encoder/tokenize.h"
 
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
                               int output_enabled, int mi_row, int mi_col,
                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx);
 
@@ -63,7 +63,7 @@ static const uint8_t VP9_VAR_OFFS[64] = {
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128
 };
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const uint16_t VP9_HIGH_VAR_OFFS_8[64] = {
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
   128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
@@ -95,9 +95,9 @@ static const uint16_t VP9_HIGH_VAR_OFFS_12[64] = {
   128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16, 128 * 16,
   128 * 16
 };
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
                                             const struct buf_2d *ref,
                                             BLOCK_SIZE bs) {
   unsigned int sse;
@@ -106,8 +106,8 @@ unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
                                                  const struct buf_2d *ref,
                                                  BLOCK_SIZE bs, int bd) {
   unsigned int var, sse;
@@ -131,9 +131,9 @@ unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
   }
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
+static unsigned int get_sby_perpixel_diff_variance(AV1_COMP *cpi,
                                                    const struct buf_2d *ref,
                                                    int mi_row, int mi_col,
                                                    BLOCK_SIZE bs) {
@@ -148,7 +148,7 @@ static unsigned int get_sby_perpixel_diff_variance(VP10_COMP *cpi,
   return ROUND_POWER_OF_TWO(var, num_pels_log2_lookup[bs]);
 }
 
-static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
+static BLOCK_SIZE get_rd_var_based_fixed_partition(AV1_COMP *cpi,
                                                    MACROBLOCK *x, int mi_row,
                                                    int mi_col) {
   unsigned int var = get_sby_perpixel_diff_variance(
@@ -165,21 +165,21 @@ static BLOCK_SIZE get_rd_var_based_fixed_partition(VP10_COMP *cpi,
 
 // Lighter version of set_offsets that only sets the mode info
 // pointers.
-static INLINE void set_mode_info_offsets(VP10_COMP *const cpi,
+static INLINE void set_mode_info_offsets(AV1_COMP *const cpi,
                                          MACROBLOCK *const x,
                                          MACROBLOCKD *const xd, int mi_row,
                                          int mi_col) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int idx_str = xd->mi_stride * mi_row + mi_col;
   xd->mi = cm->mi_grid_visible + idx_str;
   xd->mi[0] = cm->mi + idx_str;
   x->mbmi_ext = cpi->mbmi_ext_base + (mi_row * cm->mi_cols + mi_col);
 }
 
-static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_offsets(AV1_COMP *cpi, const TileInfo *const tile,
                         MACROBLOCK *const x, int mi_row, int mi_col,
                         BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi;
   const int mi_width = num_8x8_blocks_wide_lookup[bsize];
@@ -193,7 +193,7 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
   mbmi = &xd->mi[0]->mbmi;
 
   // Set up destination pointers.
-  vp10_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
+  av1_setup_dst_planes(xd->plane, get_frame_new_buffer(cm), mi_row, mi_col);
 
   // Set up limit values for MV components.
   // Mv beyond the range do not produce new/different prediction block.
@@ -208,7 +208,7 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
                  cm->mi_cols);
 
   // Set up source buffers.
-  vp10_setup_src_planes(x, cpi->Source, mi_row, mi_col);
+  av1_setup_src_planes(x, cpi->Source, mi_row, mi_col);
 
   // R/D setup.
   x->rddiv = cpi->rd.RDDIV;
@@ -221,7 +221,7 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
           seg->update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
       mbmi->segment_id = get_segment_id(cm, map, bsize, mi_row, mi_col);
     }
-    vp10_init_plane_quantizers(cpi, x);
+    av1_init_plane_quantizers(cpi, x);
 
     x->encode_breakout = cpi->segment_encode_breakout[mbmi->segment_id];
   } else {
@@ -229,11 +229,11 @@ static void set_offsets(VP10_COMP *cpi, const TileInfo *const tile,
     x->encode_breakout = cpi->encode_breakout;
   }
 
-  // required by vp10_append_sub8x8_mvs_for_idx() and vp10_find_best_ref_mvs()
+  // required by av1_append_sub8x8_mvs_for_idx() and av1_find_best_ref_mvs()
   xd->tile = *tile;
 }
 
-static void set_block_size(VP10_COMP *const cpi, MACROBLOCK *const x,
+static void set_block_size(AV1_COMP *const cpi, MACROBLOCK *const x,
                            MACROBLOCKD *const xd, int mi_row, int mi_col,
                            BLOCK_SIZE bsize) {
   if (cpi->common.mi_cols > mi_col && cpi->common.mi_rows > mi_row) {
@@ -368,12 +368,12 @@ static void fill_variance_tree(void *data, BLOCK_SIZE bsize) {
                   &node.part_variances->none);
 }
 
-static int set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
+static int set_vt_partitioning(AV1_COMP *cpi, MACROBLOCK *const x,
                                MACROBLOCKD *const xd, void *data,
                                BLOCK_SIZE bsize, int mi_row, int mi_col,
                                int64_t threshold, BLOCK_SIZE bsize_min,
                                int force_split) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   variance_node vt;
   const int block_width = num_8x8_blocks_wide_lookup[bsize];
   const int block_height = num_8x8_blocks_high_lookup[bsize];
@@ -452,8 +452,8 @@ static int set_vt_partitioning(VP10_COMP *cpi, MACROBLOCK *const x,
 // 0 - threshold_64x64, 1 - threshold_32x32, 2 - threshold_16x16,
 // 3 - vbp_threshold_8x8. vbp_threshold_8x8 (to split to 4x4 partition) is
 // currently only used on key frame.
-static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_vbp_thresholds(AV1_COMP *cpi, int64_t thresholds[], int q) {
+  AV1_COMMON *const cm = &cpi->common;
   const int is_key_frame = (cm->frame_type == KEY_FRAME);
   const int threshold_multiplier = is_key_frame ? 20 : 1;
   const int64_t threshold_base =
@@ -478,8 +478,8 @@ static void set_vbp_thresholds(VP10_COMP *cpi, int64_t thresholds[], int q) {
   }
 }
 
-void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_set_variance_partition_thresholds(AV1_COMP *cpi, int q) {
+  AV1_COMMON *const cm = &cpi->common;
   SPEED_FEATURES *const sf = &cpi->sf;
   const int is_key_frame = (cm->frame_type == KEY_FRAME);
   if (sf->partition_search_type != VAR_BASED_PARTITION &&
@@ -507,7 +507,7 @@ void vp10_set_variance_partition_thresholds(VP10_COMP *cpi, int q) {
 // Compute the minmax over the 8x8 subblocks.
 static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
                               int dp, int x16_idx, int y16_idx,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                               int highbd_flag,
 #endif
                               int pixels_wide, int pixels_high) {
@@ -521,7 +521,7 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
     int min = 0;
     int max = 0;
     if (x8_idx < pixels_wide && y8_idx < pixels_high) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
         aom_highbd_minmax_8x8(s + y8_idx * sp + x8_idx, sp,
                               d + y8_idx * dp + x8_idx, dp, &min, &max);
@@ -542,7 +542,7 @@ static int compute_minmax_8x8(const uint8_t *s, int sp, const uint8_t *d,
 
 static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
                                  int dp, int x8_idx, int y8_idx, v8x8 *vst,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                  int highbd_flag,
 #endif
                                  int pixels_wide, int pixels_high,
@@ -556,7 +556,7 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
     if (x4_idx < pixels_wide && y4_idx < pixels_high) {
       int s_avg;
       int d_avg = 128;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
         s_avg = aom_highbd_avg_4x4(s + y4_idx * sp + x4_idx, sp);
         if (!is_key_frame)
@@ -578,7 +578,7 @@ static void fill_variance_4x4avg(const uint8_t *s, int sp, const uint8_t *d,
 
 static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
                                  int dp, int x16_idx, int y16_idx, v16x16 *vst,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                  int highbd_flag,
 #endif
                                  int pixels_wide, int pixels_high,
@@ -592,7 +592,7 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
     if (x8_idx < pixels_wide && y8_idx < pixels_high) {
       int s_avg;
       int d_avg = 128;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (highbd_flag & YV12_FLAG_HIGHBITDEPTH) {
         s_avg = aom_highbd_avg_8x8(s + y8_idx * sp + x8_idx, sp);
         if (!is_key_frame)
@@ -614,9 +614,9 @@ static void fill_variance_8x8avg(const uint8_t *s, int sp, const uint8_t *d,
 
 // This function chooses partitioning based on the variance between source and
 // reconstructed last, where variance is computed for down-sampled inputs.
-static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+static int choose_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
                                MACROBLOCK *x, int mi_row, int mi_col) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *xd = &x->e_mbd;
   int i, j, k, m;
   v64x64 vt;
@@ -643,7 +643,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
     segment_id = get_segment_id(cm, map, BLOCK_64X64, mi_row, mi_col);
 
     if (cyclic_refresh_segment_id_boosted(segment_id)) {
-      int q = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+      int q = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
       set_vbp_thresholds(cpi, thresholds, q);
     }
   }
@@ -670,7 +670,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
     yv12_g = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
 
     if (yv12_g && yv12_g != yv12) {
-      vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+      av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                             &cm->frame_refs[GOLDEN_FRAME - 1].sf);
       y_sad_g = cpi->fn_ptr[bsize].sdf(
           x->plane[0].src.buf, x->plane[0].src.stride, xd->plane[0].pre[0].buf,
@@ -679,7 +679,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
       y_sad_g = UINT_MAX;
     }
 
-    vp10_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
+    av1_setup_pre_planes(xd, 0, yv12, mi_row, mi_col,
                           &cm->frame_refs[LAST_FRAME - 1].sf);
     mbmi->ref_frame[0] = LAST_FRAME;
     mbmi->ref_frame[1] = NONE;
@@ -687,9 +687,9 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
     mbmi->mv[0].as_int = 0;
     mbmi->interp_filter = BILINEAR;
 
-    y_sad = vp10_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
+    y_sad = av1_int_pro_motion_estimation(cpi, x, bsize, mi_row, mi_col);
     if (y_sad_g < y_sad) {
-      vp10_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
+      av1_setup_pre_planes(xd, 0, yv12_g, mi_row, mi_col,
                             &cm->frame_refs[GOLDEN_FRAME - 1].sf);
       mbmi->ref_frame[0] = GOLDEN_FRAME;
       mbmi->mv[0].as_int = 0;
@@ -698,7 +698,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
       x->pred_mv[LAST_FRAME] = mbmi->mv[0].as_mv;
     }
 
-    vp10_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
+    av1_build_inter_predictors_sb(xd, mi_row, mi_col, BLOCK_64X64);
 
     for (i = 1; i <= 2; ++i) {
       struct macroblock_plane *p = &x->plane[i];
@@ -731,7 +731,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
   } else {
     d = VP9_VAR_OFFS;
     dp = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       switch (xd->bd) {
         case 10: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_10); break;
@@ -740,7 +740,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
         default: d = CONVERT_TO_BYTEPTR(VP9_HIGH_VAR_OFFS_8); break;
       }
     }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   // Index for force_split: 0 for 64x64, 1-4 for 32x32 blocks,
@@ -762,7 +762,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
       variance4x4downsample[i2 + j] = 0;
       if (!is_key_frame) {
         fill_variance_8x8avg(s, sp, d, dp, x16_idx, y16_idx, vst,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                              xd->cur_buf->flags,
 #endif
                              pixels_wide, pixels_high, is_key_frame);
@@ -781,7 +781,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
           // compute the minmax over the 8x8 sub-blocks, and if above threshold,
           // force split to 8x8 block for this 16x16 block.
           int minmax = compute_minmax_8x8(s, sp, d, dp, x16_idx, y16_idx,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                           xd->cur_buf->flags,
 #endif
                                           pixels_wide, pixels_high);
@@ -803,7 +803,7 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
           int y8_idx = y16_idx + ((k >> 1) << 3);
           v8x8 *vst2 = is_key_frame ? &vst->split[k] : &vt2[i2 + j].split[k];
           fill_variance_4x4avg(s, sp, d, dp, x8_idx, y8_idx, vst2,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                xd->cur_buf->flags,
 #endif
                                pixels_wide, pixels_high, is_key_frame);
@@ -891,11 +891,11 @@ static int choose_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
   return 0;
 }
 
-static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
+static void update_state(AV1_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
                          int mi_row, int mi_col, BLOCK_SIZE bsize,
                          int output_enabled) {
   int i, x_idx, y;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RD_COUNTS *const rdc = &td->rd_counts;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -933,7 +933,7 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
     // Else for cyclic refresh mode update the segment map, set the segment id
     // and then update the quantizer.
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
-      vp10_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
+      av1_cyclic_refresh_update_segment(cpi, &xd->mi[0]->mbmi, mi_row, mi_col,
                                          bsize, ctx->rate, ctx->dist, x->skip);
     }
   }
@@ -964,7 +964,7 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
         xd->mi[x_idx + y * mis] = mi_addr;
       }
 
-  if (cpi->oxcf.aq_mode) vp10_init_plane_quantizers(cpi, x);
+  if (cpi->oxcf.aq_mode) av1_init_plane_quantizers(cpi, x);
 
   if (is_inter_block(mbmi) && mbmi->sb_type < BLOCK_8X8) {
     mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
@@ -994,10 +994,10 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
 #endif
   if (!frame_is_intra_only(cm)) {
     if (is_inter_block(mbmi)) {
-      vp10_update_mv_count(td);
+      av1_update_mv_count(td);
 
       if (cm->interp_filter == SWITCHABLE) {
-        const int ctx = vp10_get_pred_context_switchable_interp(xd);
+        const int ctx = av1_get_pred_context_switchable_interp(xd);
         ++td->counts->switchable_interp[ctx][mbmi->interp_filter];
       }
     }
@@ -1022,7 +1022,7 @@ static void update_state(VP10_COMP *cpi, ThreadData *td, PICK_MODE_CONTEXT *ctx,
   }
 }
 
-void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
+void av1_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
                            int mi_row, int mi_col) {
   uint8_t *const buffers[3] = { src->y_buffer, src->u_buffer, src->v_buffer };
   const int strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
@@ -1037,21 +1037,21 @@ void vp10_setup_src_planes(MACROBLOCK *x, const YV12_BUFFER_CONFIG *src,
                      x->e_mbd.plane[i].subsampling_y);
 }
 
-static int set_segment_rdmult(VP10_COMP *const cpi, MACROBLOCK *const x,
+static int set_segment_rdmult(AV1_COMP *const cpi, MACROBLOCK *const x,
                               int8_t segment_id) {
   int segment_qindex;
-  VP10_COMMON *const cm = &cpi->common;
-  vp10_init_plane_quantizers(cpi, x);
+  AV1_COMMON *const cm = &cpi->common;
+  av1_init_plane_quantizers(cpi, x);
   aom_clear_system_state();
-  segment_qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
-  return vp10_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
+  segment_qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+  return av1_compute_rd_mult(cpi, segment_qindex + cm->y_dc_delta_q);
 }
 
-static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
+static void rd_pick_sb_modes(AV1_COMP *cpi, TileDataEnc *tile_data,
                              MACROBLOCK *const x, int mi_row, int mi_col,
                              RD_COST *rd_cost, BLOCK_SIZE bsize,
                              PICK_MODE_CONTEXT *ctx, int64_t best_rd) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *mbmi;
@@ -1086,28 +1086,28 @@ static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
   // Set to zero to make sure we do not use the previous encoded frame stats
   mbmi->skip = 0;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    x->source_variance = vp10_high_get_sby_perpixel_variance(
+    x->source_variance = av1_high_get_sby_perpixel_variance(
         cpi, &x->plane[0].src, bsize, xd->bd);
   } else {
     x->source_variance =
-        vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+        av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
   }
 #else
   x->source_variance =
-      vp10_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+      av1_get_sby_perpixel_variance(cpi, &x->plane[0].src, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Save rdmult before it might be changed, so it can be restored later.
   orig_rdmult = x->rdmult;
 
   if (aq_mode == VARIANCE_AQ) {
     const int energy =
-        bsize <= BLOCK_16X16 ? x->mb_energy : vp10_block_energy(cpi, x, bsize);
+        bsize <= BLOCK_16X16 ? x->mb_energy : av1_block_energy(cpi, x, bsize);
     if (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
         (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref)) {
-      mbmi->segment_id = vp10_vaq_segment_id(energy);
+      mbmi->segment_id = av1_vaq_segment_id(energy);
     } else {
       const uint8_t *const map =
           cm->seg.update_map ? cpi->segmentation_map : cm->last_frame_seg_map;
@@ -1122,23 +1122,23 @@ static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
     // If segment is boosted, use rdmult for that segment.
     if (cyclic_refresh_segment_id_boosted(
             get_segment_id(cm, map, bsize, mi_row, mi_col)))
-      x->rdmult = vp10_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
+      x->rdmult = av1_cyclic_refresh_get_rdmult(cpi->cyclic_refresh);
   }
 
   // Find best coding mode & reconstruct the MB so it is available
   // as a predictor for MBs that follow in the SB
   if (frame_is_intra_only(cm)) {
-    vp10_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
+    av1_rd_pick_intra_mode_sb(cpi, x, rd_cost, bsize, ctx, best_rd);
   } else {
     if (bsize >= BLOCK_8X8) {
       if (segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP))
-        vp10_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
+        av1_rd_pick_inter_mode_sb_seg_skip(cpi, tile_data, x, rd_cost, bsize,
                                             ctx, best_rd);
       else
-        vp10_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+        av1_rd_pick_inter_mode_sb(cpi, tile_data, x, mi_row, mi_col, rd_cost,
                                    bsize, ctx, best_rd);
     } else {
-      vp10_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
+      av1_rd_pick_inter_mode_sub8x8(cpi, tile_data, x, mi_row, mi_col, rd_cost,
                                      bsize, ctx, best_rd);
     }
   }
@@ -1148,7 +1148,7 @@ static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
       (bsize >= BLOCK_16X16) &&
       (cm->frame_type == KEY_FRAME || cpi->refresh_alt_ref_frame ||
        (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref))) {
-    vp10_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
+    av1_caq_select_segment(cpi, x, bsize, mi_row, mi_col, rd_cost->rate);
   }
 
   x->rdmult = orig_rdmult;
@@ -1161,7 +1161,7 @@ static void rd_pick_sb_modes(VP10_COMP *cpi, TileDataEnc *tile_data,
   ctx->dist = rd_cost->dist;
 }
 
-static void update_stats(VP10_COMMON *cm, ThreadData *td) {
+static void update_stats(AV1_COMMON *cm, ThreadData *td) {
   const MACROBLOCK *x = &td->mb;
   const MACROBLOCKD *const xd = &x->e_mbd;
   const MODE_INFO *const mi = xd->mi[0];
@@ -1175,24 +1175,24 @@ static void update_stats(VP10_COMMON *cm, ThreadData *td) {
     const int seg_ref_active =
         segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_REF_FRAME);
     if (!seg_ref_active) {
-      counts->intra_inter[vp10_get_intra_inter_context(xd)][inter_block]++;
+      counts->intra_inter[av1_get_intra_inter_context(xd)][inter_block]++;
       // If the segment reference feature is enabled we have only a single
       // reference frame allowed for the segment so exclude it from
       // the reference frame counts used to work out probabilities.
       if (inter_block) {
         const MV_REFERENCE_FRAME ref0 = mbmi->ref_frame[0];
         if (cm->reference_mode == REFERENCE_MODE_SELECT)
-          counts->comp_inter[vp10_get_reference_mode_context(
+          counts->comp_inter[av1_get_reference_mode_context(
               cm, xd)][has_second_ref(mbmi)]++;
 
         if (has_second_ref(mbmi)) {
-          counts->comp_ref[vp10_get_pred_context_comp_ref_p(
+          counts->comp_ref[av1_get_pred_context_comp_ref_p(
               cm, xd)][ref0 == GOLDEN_FRAME]++;
         } else {
-          counts->single_ref[vp10_get_pred_context_single_ref_p1(
+          counts->single_ref[av1_get_pred_context_single_ref_p1(
               xd)][0][ref0 != LAST_FRAME]++;
           if (ref0 != LAST_FRAME)
-            counts->single_ref[vp10_get_pred_context_single_ref_p2(
+            counts->single_ref[av1_get_pred_context_single_ref_p2(
                 xd)][1][ref0 != GOLDEN_FRAME]++;
         }
       }
@@ -1277,7 +1277,7 @@ static void save_context(MACROBLOCK *const x, int mi_row, int mi_col,
          sizeof(xd->left_seg_context[0]) * mi_height);
 }
 
-static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
+static void encode_b(AV1_COMP *cpi, const TileInfo *const tile, ThreadData *td,
                      TOKENEXTRA **tp, int mi_row, int mi_col,
                      int output_enabled, BLOCK_SIZE bsize,
                      PICK_MODE_CONTEXT *ctx) {
@@ -1291,11 +1291,11 @@ static void encode_b(VP10_COMP *cpi, const TileInfo *const tile, ThreadData *td,
   }
 }
 
-static void encode_sb(VP10_COMP *cpi, ThreadData *td,
+static void encode_sb(AV1_COMP *cpi, ThreadData *td,
                       const TileInfo *const tile, TOKENEXTRA **tp, int mi_row,
                       int mi_col, int output_enabled, BLOCK_SIZE bsize,
                       PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
 
@@ -1402,10 +1402,10 @@ static void set_partial_b64x64_partition(MODE_INFO *mi, int mis, int bh_in,
 // However, at the bottom and right borders of the image the requested size
 // may not be allowed in which case this code attempts to choose the largest
 // allowable partition.
-static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
+static void set_fixed_partitioning(AV1_COMP *cpi, const TileInfo *const tile,
                                    MODE_INFO **mi_8x8, int mi_row, int mi_col,
                                    BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int mis = cm->mi_stride;
   const int row8x8_remaining = tile->mi_row_end - mi_row;
   const int col8x8_remaining = tile->mi_col_end - mi_col;
@@ -1433,12 +1433,12 @@ static void set_fixed_partitioning(VP10_COMP *cpi, const TileInfo *const tile,
   }
 }
 
-static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_use_partition(AV1_COMP *cpi, ThreadData *td,
                              TileDataEnc *tile_data, MODE_INFO **mi_8x8,
                              TOKENEXTRA **tp, int mi_row, int mi_col,
                              BLOCK_SIZE bsize, int *rate, int64_t *dist,
                              int do_recon, PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1463,9 +1463,9 @@ static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
   assert(num_4x4_blocks_wide_lookup[bsize] ==
          num_4x4_blocks_high_lookup[bsize]);
 
-  vp10_rd_cost_reset(&last_part_rdc);
-  vp10_rd_cost_reset(&none_rdc);
-  vp10_rd_cost_reset(&chosen_rdc);
+  av1_rd_cost_reset(&last_part_rdc);
+  av1_rd_cost_reset(&none_rdc);
+  av1_rd_cost_reset(&chosen_rdc);
 
   partition = partition_lookup[bsl][bs_type];
   subsize = get_subsize(bsize, partition);
@@ -1475,7 +1475,7 @@ static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
 
   if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode) {
     set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
-    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+    x->mb_energy = av1_block_energy(cpi, x, bsize);
   }
 
   if (do_partition_search &&
@@ -1529,13 +1529,13 @@ static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
           mi_row + (mi_step >> 1) < cm->mi_rows) {
         RD_COST tmp_rdc;
         PICK_MODE_CONTEXT *ctx = &pc_tree->horizontal[0];
-        vp10_rd_cost_init(&tmp_rdc);
+        av1_rd_cost_init(&tmp_rdc);
         update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
         encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
         rd_pick_sb_modes(cpi, tile_data, x, mi_row + (mi_step >> 1), mi_col,
                          &tmp_rdc, subsize, &pc_tree->horizontal[1], INT64_MAX);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-          vp10_rd_cost_reset(&last_part_rdc);
+          av1_rd_cost_reset(&last_part_rdc);
           break;
         }
         last_part_rdc.rate += tmp_rdc.rate;
@@ -1550,14 +1550,14 @@ static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
           mi_col + (mi_step >> 1) < cm->mi_cols) {
         RD_COST tmp_rdc;
         PICK_MODE_CONTEXT *ctx = &pc_tree->vertical[0];
-        vp10_rd_cost_init(&tmp_rdc);
+        av1_rd_cost_init(&tmp_rdc);
         update_state(cpi, td, ctx, mi_row, mi_col, subsize, 0);
         encode_superblock(cpi, td, tp, 0, mi_row, mi_col, subsize, ctx);
         rd_pick_sb_modes(cpi, tile_data, x, mi_row, mi_col + (mi_step >> 1),
                          &tmp_rdc, subsize,
                          &pc_tree->vertical[bsize > BLOCK_8X8], INT64_MAX);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-          vp10_rd_cost_reset(&last_part_rdc);
+          av1_rd_cost_reset(&last_part_rdc);
           break;
         }
         last_part_rdc.rate += tmp_rdc.rate;
@@ -1582,13 +1582,13 @@ static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
         if ((mi_row + y_idx >= cm->mi_rows) || (mi_col + x_idx >= cm->mi_cols))
           continue;
 
-        vp10_rd_cost_init(&tmp_rdc);
+        av1_rd_cost_init(&tmp_rdc);
         rd_use_partition(cpi, td, tile_data, mi_8x8 + jj * bss * mis + ii * bss,
                          tp, mi_row + y_idx, mi_col + x_idx, subsize,
                          &tmp_rdc.rate, &tmp_rdc.dist, i != 3,
                          pc_tree->split[i]);
         if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-          vp10_rd_cost_reset(&last_part_rdc);
+          av1_rd_cost_reset(&last_part_rdc);
           break;
         }
         last_part_rdc.rate += tmp_rdc.rate;
@@ -1638,7 +1638,7 @@ static void rd_use_partition(VP10_COMP *cpi, ThreadData *td,
       restore_context(x, mi_row, mi_col, a, l, sa, sl, bsize);
 
       if (tmp_rdc.rate == INT_MAX || tmp_rdc.dist == INT64_MAX) {
-        vp10_rd_cost_reset(&chosen_rdc);
+        av1_rd_cost_reset(&chosen_rdc);
         break;
       }
 
@@ -1740,11 +1740,11 @@ static const BLOCK_SIZE next_square_size[BLOCK_SIZES] = {
 
 // Look at neighboring blocks and set a min and max partition size based on
 // what they chose.
-static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
+static void rd_auto_partition_range(AV1_COMP *cpi, const TileInfo *const tile,
                                     MACROBLOCKD *const xd, int mi_row,
                                     int mi_col, BLOCK_SIZE *min_block_size,
                                     BLOCK_SIZE *max_block_size) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MODE_INFO **mi = xd->mi;
   const int left_in_image = xd->left_available && mi[-1];
   const int above_in_image = xd->up_available && mi[-xd->mi_stride];
@@ -1795,7 +1795,7 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
   // Test for blocks at the edge of the active image.
   // This may be the actual edge of the image or where there are formatting
   // bars.
-  if (vp10_active_edge_sb(cpi, mi_row, mi_col)) {
+  if (av1_active_edge_sb(cpi, mi_row, mi_col)) {
     min_size = BLOCK_4X4;
   } else {
     min_size =
@@ -1815,7 +1815,7 @@ static void rd_auto_partition_range(VP10_COMP *cpi, const TileInfo *const tile,
 }
 
 // TODO(jingning) refactor functions setting partition search range
-static void set_partition_range(VP10_COMMON *cm, MACROBLOCKD *xd, int mi_row,
+static void set_partition_range(AV1_COMMON *cm, MACROBLOCKD *xd, int mi_row,
                                 int mi_col, BLOCK_SIZE bsize,
                                 BLOCK_SIZE *min_bs, BLOCK_SIZE *max_bs) {
   int mi_width = num_8x8_blocks_wide_lookup[bsize];
@@ -1927,12 +1927,12 @@ static INLINE int get_motion_inconsistency(MOTION_DIRECTION this_mv,
 // TODO(jingning,jimbankoski,rbultje): properly skip partition types that are
 // unlikely to be selected depending on previous rate-distortion optimization
 // results, for encoding speed-up.
-static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
+static void rd_pick_partition(AV1_COMP *cpi, ThreadData *td,
                               TileDataEnc *tile_data, TOKENEXTRA **tp,
                               int mi_row, int mi_col, BLOCK_SIZE bsize,
                               RD_COST *rd_cost, int64_t best_rd,
                               PC_TREE *pc_tree) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1971,15 +1971,15 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   assert(num_8x8_blocks_wide_lookup[bsize] ==
          num_8x8_blocks_high_lookup[bsize]);
 
-  vp10_rd_cost_init(&this_rdc);
-  vp10_rd_cost_init(&sum_rdc);
-  vp10_rd_cost_reset(&best_rdc);
+  av1_rd_cost_init(&this_rdc);
+  av1_rd_cost_init(&sum_rdc);
+  av1_rd_cost_reset(&best_rdc);
   best_rdc.rdcost = best_rd;
 
   set_offsets(cpi, tile_info, x, mi_row, mi_col, bsize);
 
   if (bsize == BLOCK_16X16 && cpi->oxcf.aq_mode)
-    x->mb_energy = vp10_block_energy(cpi, x, bsize);
+    x->mb_energy = av1_block_energy(cpi, x, bsize);
 
   if (cpi->sf.cb_partition_search && bsize == BLOCK_16X16) {
     int cb_partition_search_ctrl =
@@ -2215,7 +2215,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
 
   // PARTITION_HORZ
   if (partition_horz_allowed &&
-      (do_rect || vp10_active_h_edge(cpi, mi_row, mi_step))) {
+      (do_rect || av1_active_h_edge(cpi, mi_row, mi_step))) {
     subsize = get_subsize(bsize, PARTITION_HORZ);
     if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
     if (cpi->sf.adaptive_pred_interp_filter && bsize == BLOCK_8X8 &&
@@ -2259,7 +2259,7 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   }
   // PARTITION_VERT
   if (partition_vert_allowed &&
-      (do_rect || vp10_active_v_edge(cpi, mi_col, mi_step))) {
+      (do_rect || av1_active_v_edge(cpi, mi_col, mi_step))) {
     subsize = get_subsize(bsize, PARTITION_VERT);
 
     if (cpi->sf.adaptive_motion_search) load_pred_mv(x, ctx);
@@ -2325,10 +2325,10 @@ static void rd_pick_partition(VP10_COMP *cpi, ThreadData *td,
   }
 }
 
-static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
+static void encode_rd_sb_row(AV1_COMP *cpi, ThreadData *td,
                              TileDataEnc *tile_data, int mi_row,
                              TOKENEXTRA **tp) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   TileInfo *const tile_info = &tile_data->tile_info;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -2363,7 +2363,7 @@ static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
       }
     }
 
-    vp10_zero(x->pred_mv);
+    av1_zero(x->pred_mv);
     td->pc_root->index = 0;
 
     if (seg->enabled) {
@@ -2406,16 +2406,16 @@ static void encode_rd_sb_row(VP10_COMP *cpi, ThreadData *td,
   }
 }
 
-static void init_encode_frame_mb_context(VP10_COMP *cpi) {
+static void init_encode_frame_mb_context(AV1_COMP *cpi) {
   MACROBLOCK *const x = &cpi->td.mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   const int aligned_mi_cols = mi_cols_aligned_to_sb(cm->mi_cols);
 
   // Copy data over into macro block data structures.
-  vp10_setup_src_planes(x, cpi->Source, 0, 0);
+  av1_setup_src_planes(x, cpi->Source, 0, 0);
 
-  vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+  av1_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
 
   // Note: this memset assumes above_context[0], [1] and [2]
   // are allocated as part of the same buffer.
@@ -2425,7 +2425,7 @@ static void init_encode_frame_mb_context(VP10_COMP *cpi) {
          sizeof(*xd->above_seg_context) * aligned_mi_cols);
 }
 
-static int check_dual_ref_flags(VP10_COMP *cpi) {
+static int check_dual_ref_flags(AV1_COMP *cpi) {
   const int ref_flags = cpi->ref_frame_flags;
 
   if (segfeature_active(&cpi->common.seg, 1, SEG_LVL_REF_FRAME)) {
@@ -2436,7 +2436,7 @@ static int check_dual_ref_flags(VP10_COMP *cpi) {
   }
 }
 
-static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) {
+static void reset_skip_tx_size(AV1_COMMON *cm, TX_SIZE max_tx_size) {
   int mi_row, mi_col;
   const int mis = cm->mi_stride;
   MODE_INFO **mi_ptr = cm->mi_grid_visible;
@@ -2449,7 +2449,7 @@ static void reset_skip_tx_size(VP10_COMMON *cm, TX_SIZE max_tx_size) {
   }
 }
 
-static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
+static MV_REFERENCE_FRAME get_frame_type(const AV1_COMP *cpi) {
   if (frame_is_intra_only(&cpi->common))
     return INTRA_FRAME;
   else if (cpi->rc.is_src_frame_alt_ref && cpi->refresh_golden_frame)
@@ -2460,7 +2460,7 @@ static MV_REFERENCE_FRAME get_frame_type(const VP10_COMP *cpi) {
     return LAST_FRAME;
 }
 
-static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
+static TX_MODE select_tx_mode(const AV1_COMP *cpi, MACROBLOCKD *const xd) {
   if (xd->lossless[0]) return ONLY_4X4;
   if (cpi->sf.tx_size_search_method == USE_LARGESTALL)
     return ALLOW_32X32;
@@ -2471,8 +2471,8 @@ static TX_MODE select_tx_mode(const VP10_COMP *cpi, MACROBLOCKD *const xd) {
     return cpi->common.tx_mode;
 }
 
-void vp10_init_tile_data(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_init_tile_data(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = 1 << cm->log2_tile_cols;
   const int tile_rows = 1 << cm->log2_tile_rows;
   int tile_col, tile_row;
@@ -2503,7 +2503,7 @@ void vp10_init_tile_data(VP10_COMP *cpi) {
     for (tile_col = 0; tile_col < tile_cols; ++tile_col) {
       TileInfo *tile_info =
           &cpi->tile_data[tile_row * tile_cols + tile_col].tile_info;
-      vp10_tile_init(tile_info, cm, tile_row, tile_col);
+      av1_tile_init(tile_info, cm, tile_row, tile_col);
 
       cpi->tile_tok[tile_row][tile_col] = pre_tok + tile_tok;
       pre_tok = cpi->tile_tok[tile_row][tile_col];
@@ -2512,9 +2512,9 @@ void vp10_init_tile_data(VP10_COMP *cpi) {
   }
 }
 
-void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
+void av1_encode_tile(AV1_COMP *cpi, ThreadData *td, int tile_row,
                       int tile_col) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = 1 << cm->log2_tile_cols;
   TileDataEnc *this_tile = &cpi->tile_data[tile_row * tile_cols + tile_col];
   const TileInfo *const tile_info = &this_tile->tile_info;
@@ -2535,22 +2535,22 @@ void vp10_encode_tile(VP10_COMP *cpi, ThreadData *td, int tile_row,
          allocated_tokens(*tile_info));
 }
 
-static void encode_tiles(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void encode_tiles(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = 1 << cm->log2_tile_cols;
   const int tile_rows = 1 << cm->log2_tile_rows;
   int tile_col, tile_row;
 
-  vp10_init_tile_data(cpi);
+  av1_init_tile_data(cpi);
 
   for (tile_row = 0; tile_row < tile_rows; ++tile_row)
     for (tile_col = 0; tile_col < tile_cols; ++tile_col)
-      vp10_encode_tile(cpi, &cpi->td, tile_row, tile_col);
+      av1_encode_tile(cpi, &cpi->td, tile_row, tile_col);
 }
 
 #if CONFIG_FP_MB_STATS
 static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
-                            VP10_COMMON *cm, uint8_t **this_frame_mb_stats) {
+                            AV1_COMMON *cm, uint8_t **this_frame_mb_stats) {
   uint8_t *mb_stats_in = firstpass_mb_stats->mb_stats_start +
                          cm->current_video_frame * cm->MBs * sizeof(uint8_t);
 
@@ -2562,10 +2562,10 @@ static int input_fpmb_stats(FIRSTPASS_MB_STATS *firstpass_mb_stats,
 }
 #endif
 
-static void encode_frame_internal(VP10_COMP *cpi) {
+static void encode_frame_internal(AV1_COMP *cpi) {
   ThreadData *const td = &cpi->td;
   MACROBLOCK *const x = &td->mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   RD_COUNTS *const rdc = &cpi->td.rd_counts;
   int i;
@@ -2573,16 +2573,16 @@ static void encode_frame_internal(VP10_COMP *cpi) {
   xd->mi = cm->mi_grid_visible;
   xd->mi[0] = cm->mi;
 
-  vp10_zero(*td->counts);
-  vp10_zero(rdc->coef_counts);
-  vp10_zero(rdc->comp_pred_diff);
-  vp10_zero(rdc->filter_diff);
+  av1_zero(*td->counts);
+  av1_zero(rdc->coef_counts);
+  av1_zero(rdc->comp_pred_diff);
+  av1_zero(rdc->filter_diff);
   rdc->m_search_count = 0;   // Count of motion search hits.
   rdc->ex_search_count = 0;  // Exhaustive mesh search hits.
 
   for (i = 0; i < MAX_SEGMENTS; ++i) {
     const int qindex = CONFIG_MISC_FIXES && cm->seg.enabled
-                           ? vp10_get_qindex(&cm->seg, i, cm->base_qindex)
+                           ? av1_get_qindex(&cm->seg, i, cm->base_qindex)
                            : cm->base_qindex;
     xd->lossless[i] = qindex == 0 && cm->y_dc_delta_q == 0 &&
                       cm->uv_dc_delta_q == 0 && cm->uv_ac_delta_q == 0;
@@ -2592,10 +2592,10 @@ static void encode_frame_internal(VP10_COMP *cpi) {
 
   cm->tx_mode = select_tx_mode(cpi, xd);
 
-  vp10_frame_init_quantizer(cpi);
+  av1_frame_init_quantizer(cpi);
 
-  vp10_initialize_rd_consts(cpi);
-  vp10_initialize_me_consts(cpi, x, cm->base_qindex);
+  av1_initialize_rd_consts(cpi);
+  av1_initialize_me_consts(cpi, x, cm->base_qindex);
   init_encode_frame_mb_context(cpi);
   cm->use_prev_frame_mvs =
       !cm->error_resilient_mode && cm->width == cm->last_width &&
@@ -2606,7 +2606,7 @@ static void encode_frame_internal(VP10_COMP *cpi) {
       cm->use_prev_frame_mvs ? cm->prev_mip + cm->mi_stride + 1 : NULL;
 
   x->quant_fp = cpi->sf.use_quant_fp;
-  vp10_zero(x->skip_txfm);
+  av1_zero(x->skip_txfm);
 
   {
     struct aom_usec_timer emr_timer;
@@ -2621,7 +2621,7 @@ static void encode_frame_internal(VP10_COMP *cpi) {
 
     // If allowed, encoding tiles in parallel with one thread handling one tile.
     if (VPXMIN(cpi->oxcf.max_threads, 1 << cm->log2_tile_cols) > 1)
-      vp10_encode_tiles_mt(cpi);
+      av1_encode_tiles_mt(cpi);
     else
       encode_tiles(cpi);
 
@@ -2651,8 +2651,8 @@ static INTERP_FILTER get_interp_filter(
   }
 }
 
-void vp10_encode_frame(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_encode_frame(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
   // In the longer term the encoder should be generalized to match the
   // decoder such that we allow compound where one of the 3 buffers has a
@@ -2728,10 +2728,10 @@ void vp10_encode_frame(VP10_COMP *cpi) {
 
       if (comp_count_zero == 0) {
         cm->reference_mode = SINGLE_REFERENCE;
-        vp10_zero(counts->comp_inter);
+        av1_zero(counts->comp_inter);
       } else if (single_count_zero == 0) {
         cm->reference_mode = COMPOUND_REFERENCE;
-        vp10_zero(counts->comp_inter);
+        av1_zero(counts->comp_inter);
       }
     }
 
@@ -2791,8 +2791,8 @@ static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi,
         const int bidx = idy * 2 + idx;
         const PREDICTION_MODE bmode = mi->bmi[bidx].as_mode;
         if (intraonly) {
-          const PREDICTION_MODE a = vp10_above_block_mode(mi, above_mi, bidx);
-          const PREDICTION_MODE l = vp10_left_block_mode(mi, left_mi, bidx);
+          const PREDICTION_MODE a = av1_above_block_mode(mi, above_mi, bidx);
+          const PREDICTION_MODE l = av1_left_block_mode(mi, left_mi, bidx);
           ++counts->kf_y_mode[a][l][bmode];
         } else {
           ++counts->y_mode[0][bmode];
@@ -2800,8 +2800,8 @@ static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi,
       }
   } else {
     if (intraonly) {
-      const PREDICTION_MODE above = vp10_above_block_mode(mi, above_mi, 0);
-      const PREDICTION_MODE left = vp10_left_block_mode(mi, left_mi, 0);
+      const PREDICTION_MODE above = av1_above_block_mode(mi, above_mi, 0);
+      const PREDICTION_MODE left = av1_left_block_mode(mi, left_mi, 0);
       ++counts->kf_y_mode[above][left][y_mode];
     } else {
       ++counts->y_mode[size_group_lookup[bsize]][y_mode];
@@ -2811,10 +2811,10 @@ static void sum_intra_stats(FRAME_COUNTS *counts, const MODE_INFO *mi,
   ++counts->uv_mode[y_mode][uv_mode];
 }
 
-static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+static void encode_superblock(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
                               int output_enabled, int mi_row, int mi_col,
                               BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MODE_INFO **mi_8x8 = xd->mi;
@@ -2841,11 +2841,11 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
     int plane;
     mbmi->skip = 1;
     for (plane = 0; plane < MAX_MB_PLANE; ++plane)
-      vp10_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane);
+      av1_encode_intra_block_plane(x, VPXMAX(bsize, BLOCK_8X8), plane);
     if (output_enabled)
       sum_intra_stats(td->counts, mi, xd->above_mi, xd->left_mi,
                       frame_is_intra_only(cm));
-    vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+    av1_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
   } else {
     int ref;
     const int is_compound = has_second_ref(mbmi);
@@ -2853,18 +2853,18 @@ static void encode_superblock(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
     for (ref = 0; ref < 1 + is_compound; ++ref) {
       YV12_BUFFER_CONFIG *cfg = get_ref_frame_buffer(cpi, mbmi->ref_frame[ref]);
       assert(cfg != NULL);
-      vp10_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
+      av1_setup_pre_planes(xd, ref, cfg, mi_row, mi_col,
                             &xd->block_refs[ref]->sf);
     }
     if (!(cpi->sf.reuse_inter_pred_sby && ctx->pred_pixel_ready) || seg_skip)
-      vp10_build_inter_predictors_sby(xd, mi_row, mi_col,
+      av1_build_inter_predictors_sby(xd, mi_row, mi_col,
                                       VPXMAX(bsize, BLOCK_8X8));
 
-    vp10_build_inter_predictors_sbuv(xd, mi_row, mi_col,
+    av1_build_inter_predictors_sbuv(xd, mi_row, mi_col,
                                      VPXMAX(bsize, BLOCK_8X8));
 
-    vp10_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
-    vp10_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
+    av1_encode_sb(x, VPXMAX(bsize, BLOCK_8X8));
+    av1_tokenize_sb(cpi, td, t, !output_enabled, VPXMAX(bsize, BLOCK_8X8));
   }
 
   if (output_enabled) {
diff --git a/av1/encoder/encodeframe.h b/av1/encoder/encodeframe.h
index 2b70d730692be4b22fdb2b140627107758bc8fcb..59936db44019e6e8930056bc22d9c79b4f68b4f6 100644
--- a/av1/encoder/encodeframe.h
+++ b/av1/encoder/encodeframe.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_ENCODEFRAME_H_
-#define VP10_ENCODER_ENCODEFRAME_H_
+#ifndef AV1_ENCODER_ENCODEFRAME_H_
+#define AV1_ENCODER_ENCODEFRAME_H_
 
 #include "aom/aom_integer.h"
 
@@ -20,7 +20,7 @@ extern "C" {
 
 struct macroblock;
 struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
 struct ThreadData;
 
 // Constants used in SOURCE_VAR_BASED_PARTITION
@@ -30,20 +30,20 @@ struct ThreadData;
 #define VAR_HIST_LARGE_CUT_OFF 75
 #define VAR_HIST_SMALL_CUT_OFF 45
 
-void vp10_setup_src_planes(struct macroblock *x,
+void av1_setup_src_planes(struct macroblock *x,
                            const struct yv12_buffer_config *src, int mi_row,
                            int mi_col);
 
-void vp10_encode_frame(struct VP10_COMP *cpi);
+void av1_encode_frame(struct AV1_COMP *cpi);
 
-void vp10_init_tile_data(struct VP10_COMP *cpi);
-void vp10_encode_tile(struct VP10_COMP *cpi, struct ThreadData *td,
+void av1_init_tile_data(struct AV1_COMP *cpi);
+void av1_encode_tile(struct AV1_COMP *cpi, struct ThreadData *td,
                       int tile_row, int tile_col);
 
-void vp10_set_variance_partition_thresholds(struct VP10_COMP *cpi, int q);
+void av1_set_variance_partition_thresholds(struct AV1_COMP *cpi, int q);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODEFRAME_H_
+#endif  // AV1_ENCODER_ENCODEFRAME_H_
diff --git a/av1/encoder/encodemb.c b/av1/encoder/encodemb.c
index a7d5f50dd40a7f51a312af0efb13b17dd8f3b170..c2d4ae867cca6753478a7c95c37790923771a545 100644
--- a/av1/encoder/encodemb.c
+++ b/av1/encoder/encodemb.c
@@ -31,21 +31,21 @@ struct optimize_ctx {
   ENTROPY_CONTEXT tl[MAX_MB_PLANE][16];
 };
 
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   struct macroblock_plane *const p = &x->plane[plane];
   const struct macroblockd_plane *const pd = &x->e_mbd.plane[plane];
   const BLOCK_SIZE plane_bsize = get_plane_block_size(bsize, pd);
   const int bw = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
   const int bh = 4 * num_4x4_blocks_high_lookup[plane_bsize];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (x->e_mbd.cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     aom_highbd_subtract_block(bh, bw, p->src_diff, bw, p->src.buf,
                               p->src.stride, pd->dst.buf, pd->dst.stride,
                               x->e_mbd.bd);
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   aom_subtract_block(bh, bw, p->src_diff, bw, p->src.buf, p->src.stride,
                      pd->dst.buf, pd->dst.stride);
 }
@@ -54,13 +54,13 @@ void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   (((1 << (VP9_PROB_COST_SHIFT - 1)) + (R) * (RM)) & \
    ((1 << VP9_PROB_COST_SHIFT) - 1))
 
-typedef struct vp10_token_state {
+typedef struct av1_token_state {
   int rate;
   int error;
   int next;
   int16_t token;
   short qc;
-} vp10_token_state;
+} av1_token_state;
 
 // TODO(jimbankoski): experiment to find optimal RD numbers.
 static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 };
@@ -80,7 +80,7 @@ static const int plane_rd_mult[PLANE_TYPES] = { 4, 2 };
 static int trellis_get_coeff_context(const int16_t *scan, const int16_t *nb,
                                      int idx, int token, uint8_t *token_cache) {
   int bak = token_cache[scan[idx]], pt;
-  token_cache[scan[idx]] = vp10_pt_energy_class[token];
+  token_cache[scan[idx]] = av1_pt_energy_class[token];
   pt = get_coef_context(nb, token_cache, idx + 1);
   token_cache[scan[idx]] = bak;
   return pt;
@@ -92,7 +92,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
   struct macroblock_plane *const p = &mb->plane[plane];
   struct macroblockd_plane *const pd = &xd->plane[plane];
   const int ref = is_inter_block(&xd->mi[0]->mbmi);
-  vp10_token_state tokens[1025][2];
+  av1_token_state tokens[1025][2];
   unsigned best_index[1025][2];
   uint8_t token_cache[1024];
   const tran_low_t *const coeff = BLOCK_OFFSET(mb->plane[plane].coeff, block);
@@ -120,10 +120,10 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
   int16_t t0, t1;
   EXTRABIT e0;
   int best, band, pt, i, final_eob;
-#if CONFIG_VPX_HIGHBITDEPTH
-  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+  const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
 #else
-  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+  const int *cat6_high_cost = av1_get_high_cost_table(8);
 #endif
 
   assert((!type && !plane) || (type && plane));
@@ -142,7 +142,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
 
   for (i = 0; i < eob; i++)
     token_cache[scan[i]] =
-        vp10_pt_energy_class[vp10_get_token(qcoeff[scan[i]])];
+        av1_pt_energy_class[av1_get_token(qcoeff[scan[i]])];
 
   for (i = eob; i-- > 0;) {
     int base_bits, d2, dx;
@@ -160,7 +160,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
       /* Evaluate the first possibility for this state. */
       rate0 = tokens[next][0].rate;
       rate1 = tokens[next][1].rate;
-      vp10_get_token_extra(x, &t0, &e0);
+      av1_get_token_extra(x, &t0, &e0);
       /* Consider both possible successor states. */
       if (next < default_eob) {
         band = band_translate[i + 1];
@@ -175,13 +175,13 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
       UPDATE_RD_COST();
       /* And pick the best. */
       best = rd_cost1 < rd_cost0;
-      base_bits = vp10_get_cost(t0, e0, cat6_high_cost);
+      base_bits = av1_get_cost(t0, e0, cat6_high_cost);
       dx = mul * (dqcoeff[rc] - coeff[rc]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         dx >>= xd->bd - 8;
       }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       d2 = dx * dx;
       tokens[i][0].rate = base_bits + (best ? rate1 : rate0);
       tokens[i][0].error = d2 + (best ? error1 : error0);
@@ -222,7 +222,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
         t1 = tokens[next][1].token == EOB_TOKEN ? EOB_TOKEN : ZERO_TOKEN;
         e0 = 0;
       } else {
-        vp10_get_token_extra(x, &t0, &e0);
+        av1_get_token_extra(x, &t0, &e0);
         t1 = t0;
       }
       if (next < default_eob) {
@@ -244,10 +244,10 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
       UPDATE_RD_COST();
       /* And pick the best. */
       best = rd_cost1 < rd_cost0;
-      base_bits = vp10_get_cost(t0, e0, cat6_high_cost);
+      base_bits = av1_get_cost(t0, e0, cat6_high_cost);
 
       if (shortcut) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
           dx -= ((dequant_ptr[rc != 0] >> (xd->bd - 8)) + sz) ^ sz;
         } else {
@@ -255,7 +255,7 @@ static int optimize_b(MACROBLOCK *mb, int plane, int block, TX_SIZE tx_size,
         }
 #else
         dx -= (dequant_ptr[rc != 0] + sz) ^ sz;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         d2 = dx * dx;
       }
 
@@ -341,7 +341,7 @@ static INLINE void fdct32x32(int rd_transform, const int16_t *src,
     aom_fdct32x32(src, dst, src_stride);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static INLINE void highbd_fdct32x32(int rd_transform, const int16_t *src,
                                     tran_low_t *dst, int src_stride) {
   if (rd_transform)
@@ -349,12 +349,12 @@ static INLINE void highbd_fdct32x32(int rd_transform, const int16_t *src,
   else
     aom_highbd_fdct32x32(src, dst, src_stride);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+void av1_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
                        int diff_stride, TX_TYPE tx_type, int lossless) {
   if (lossless) {
-    vp10_fwht4x4(src_diff, coeff, diff_stride);
+    av1_fwht4x4(src_diff, coeff, diff_stride);
   } else {
     switch (tx_type) {
       case DCT_DCT:
@@ -363,7 +363,7 @@ void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
       case ADST_DCT:
       case DCT_ADST:
       case ADST_ADST:
-        vp10_fht4x4(src_diff, coeff, diff_stride, tx_type);
+        av1_fht4x4(src_diff, coeff, diff_stride, tx_type);
         break;
       default:
         assert(0);
@@ -379,7 +379,7 @@ static void fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_fht8x8(src_diff, coeff, diff_stride, tx_type);
+      av1_fht8x8(src_diff, coeff, diff_stride, tx_type);
       break;
     default:
       assert(0);
@@ -394,7 +394,7 @@ static void fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_fht16x16(src_diff, coeff, diff_stride, tx_type);
+      av1_fht16x16(src_diff, coeff, diff_stride, tx_type);
       break;
     default:
       assert(0);
@@ -420,12 +420,12 @@ static void fwd_txfm_32x32(int rd_transform, const int16_t *src_diff,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
                               int diff_stride, TX_TYPE tx_type, int lossless) {
   if (lossless) {
     assert(tx_type == DCT_DCT);
-    vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+    av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
   } else {
     switch (tx_type) {
       case DCT_DCT:
@@ -434,7 +434,7 @@ void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
       case ADST_DCT:
       case DCT_ADST:
       case ADST_ADST:
-        vp10_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
+        av1_highbd_fht4x4(src_diff, coeff, diff_stride, tx_type);
         break;
       default:
         assert(0);
@@ -452,7 +452,7 @@ static void highbd_fwd_txfm_8x8(const int16_t *src_diff, tran_low_t *coeff,
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
+      av1_highbd_fht8x8(src_diff, coeff, diff_stride, tx_type);
       break;
     default:
       assert(0);
@@ -469,7 +469,7 @@ static void highbd_fwd_txfm_16x16(const int16_t *src_diff, tran_low_t *coeff,
     case ADST_DCT:
     case DCT_ADST:
     case ADST_ADST:
-      vp10_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
+      av1_highbd_fht16x16(src_diff, coeff, diff_stride, tx_type);
       break;
     default:
       assert(0);
@@ -494,9 +494,9 @@ static void highbd_fwd_txfm_32x32(int rd_transform, const int16_t *src_diff,
       break;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
@@ -518,12 +518,12 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
   const int16_t *src_diff;
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
       case TX_32X32:
         highbd_fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
-        vp10_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
+        av1_highbd_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin,
                                       p->round_fp, p->quant_fp, p->quant_shift,
                                       qcoeff, dqcoeff, pd->dequant, eob,
                                       scan_order->scan,
@@ -535,7 +535,7 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
         break;
       case TX_16X16:
         aom_highbd_fdct16x16(src_diff, coeff, diff_stride);
-        vp10_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
+        av1_highbd_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
                                 p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
                                 pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -546,7 +546,7 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
         break;
       case TX_8X8:
         aom_highbd_fdct8x8(src_diff, coeff, diff_stride);
-        vp10_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
+        av1_highbd_quantize_fp(coeff, 64, x->skip_block, p->zbin, p->round_fp,
                                 p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
                                 pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -557,11 +557,11 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
         break;
       case TX_4X4:
         if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
-          vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+          av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
         } else {
           aom_highbd_fdct4x4(src_diff, coeff, diff_stride);
         }
-        vp10_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
+        av1_highbd_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
                                 p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
                                 pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -575,12 +575,12 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
     }
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   switch (tx_size) {
     case TX_32X32:
       fdct32x32(x->use_lp32x32fdct, src_diff, coeff, diff_stride);
-      vp10_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
+      av1_quantize_fp_32x32(coeff, 1024, x->skip_block, p->zbin, p->round_fp,
                              p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
                              pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -591,7 +591,7 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
       break;
     case TX_16X16:
       aom_fdct16x16(src_diff, coeff, diff_stride);
-      vp10_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
+      av1_quantize_fp(coeff, 256, x->skip_block, p->zbin, p->round_fp,
                        p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
                        pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -601,7 +601,7 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
 #endif
       break;
     case TX_8X8:
-      vp10_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
+      av1_fdct8x8_quant(src_diff, diff_stride, coeff, 64, x->skip_block,
                          p->zbin, p->round_fp, p->quant_fp, p->quant_shift,
                          qcoeff, dqcoeff, pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -612,11 +612,11 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
       break;
     case TX_4X4:
       if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
-        vp10_fwht4x4(src_diff, coeff, diff_stride);
+        av1_fwht4x4(src_diff, coeff, diff_stride);
       } else {
         aom_fdct4x4(src_diff, coeff, diff_stride);
       }
-      vp10_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
+      av1_quantize_fp(coeff, 16, x->skip_block, p->zbin, p->round_fp,
                        p->quant_fp, p->quant_shift, qcoeff, dqcoeff,
                        pd->dequant, eob, scan_order->scan,
 #if !CONFIG_AOM_QM
@@ -631,7 +631,7 @@ void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
   }
 }
 
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
@@ -650,7 +650,7 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
   const int16_t *src_diff;
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
       case TX_32X32:
@@ -686,7 +686,7 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
         break;
       case TX_4X4:
         if (xd->lossless[seg_id]) {
-          vp10_highbd_fwht4x4(src_diff, coeff, diff_stride);
+          av1_highbd_fwht4x4(src_diff, coeff, diff_stride);
         } else {
           aom_highbd_fdct4x4(src_diff, coeff, diff_stride);
         }
@@ -703,7 +703,7 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
     }
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   switch (tx_size) {
     case TX_32X32:
@@ -738,7 +738,7 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
       break;
     case TX_4X4:
       if (xd->lossless[seg_id]) {
-        vp10_fwht4x4(src_diff, coeff, diff_stride);
+        av1_fwht4x4(src_diff, coeff, diff_stride);
       } else {
         aom_fdct4x4(src_diff, coeff, diff_stride);
       }
@@ -756,7 +756,7 @@ void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
   }
 }
 
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
                       int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size) {
   MACROBLOCKD *const xd = &x->e_mbd;
   const struct macroblock_plane *const p = &x->plane[plane];
@@ -778,7 +778,7 @@ void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
   const int16_t *src_diff;
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
       case TX_32X32:
@@ -816,7 +816,7 @@ void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
 #endif
         break;
       case TX_4X4:
-        vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+        av1_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                                  xd->lossless[seg_id]);
         aom_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
                               p->quant, p->quant_shift, qcoeff, dqcoeff,
@@ -832,7 +832,7 @@ void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
     }
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   switch (tx_size) {
     case TX_32X32:
@@ -869,7 +869,7 @@ void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
 #endif
       break;
     case TX_4X4:
-      vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+      av1_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                         xd->lossless[seg_id]);
       aom_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
                      p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
@@ -919,7 +919,7 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
         *a = *l = 0;
         return;
       } else {
-        vp10_xform_quant_fp(x, plane, block, blk_row, blk_col, plane_bsize,
+        av1_xform_quant_fp(x, plane, block, blk_row, blk_col, plane_bsize,
                             tx_size);
       }
     } else {
@@ -927,11 +927,11 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
         int txfm_blk_index = (plane << 2) + (block >> (tx_size << 1));
         if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_NONE) {
           // full forward transform and quantization
-          vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+          av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
                            tx_size);
         } else if (x->skip_txfm[txfm_blk_index] == SKIP_TXFM_AC_ONLY) {
           // fast path forward transform and quantization
-          vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+          av1_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
                               tx_size);
         } else {
           // skip forward transform
@@ -940,7 +940,7 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
           return;
         }
       } else {
-        vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
+        av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize,
                          tx_size);
       }
     }
@@ -956,26 +956,26 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
   if (p->eobs[block]) *(args->skip) = 0;
 
   if (p->eobs[block] == 0) return;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
       case TX_32X32:
-        vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride,
+        av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride,
                                        p->eobs[block], xd->bd, tx_type);
         break;
       case TX_16X16:
-        vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride,
+        av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride,
                                        p->eobs[block], xd->bd, tx_type);
         break;
       case TX_8X8:
-        vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride,
+        av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride,
                                      p->eobs[block], xd->bd, tx_type);
         break;
       case TX_4X4:
-        // this is like vp10_short_idct4x4 but has a special case around eob<=1
+        // this is like av1_short_idct4x4 but has a special case around eob<=1
         // which is significant (not just an optimization) for the lossless
         // case.
-        vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride,
+        av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride,
                                      p->eobs[block], xd->bd, tx_type,
                                      xd->lossless[xd->mi[0]->mbmi.segment_id]);
         break;
@@ -986,26 +986,26 @@ static void encode_block(int plane, int block, int blk_row, int blk_col,
 
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   switch (tx_size) {
     case TX_32X32:
-      vp10_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+      av1_inv_txfm_add_32x32(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                               tx_type);
       break;
     case TX_16X16:
-      vp10_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+      av1_inv_txfm_add_16x16(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                               tx_type);
       break;
     case TX_8X8:
-      vp10_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+      av1_inv_txfm_add_8x8(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                             tx_type);
       break;
     case TX_4X4:
-      // this is like vp10_short_idct4x4 but has a special case around eob<=1
+      // this is like av1_short_idct4x4 but has a special case around eob<=1
       // which is significant (not just an optimization) for the lossless
       // case.
-      vp10_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+      av1_inv_txfm_add_4x4(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                             tx_type, xd->lossless[xd->mi[0]->mbmi.segment_id]);
       break;
     default:
@@ -1025,36 +1025,36 @@ static void encode_block_pass1(int plane, int block, int blk_row, int blk_col,
   uint8_t *dst;
   dst = &pd->dst.buf[4 * blk_row * pd->dst.stride + 4 * blk_col];
 
-  vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
+  av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
 
   if (p->eobs[block] > 0) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       if (xd->lossless[0]) {
-        vp10_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+        av1_highbd_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                                 xd->bd);
       } else {
-        vp10_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
+        av1_highbd_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block],
                                 xd->bd);
       }
       return;
     }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     if (xd->lossless[0]) {
-      vp10_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+      av1_iwht4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
     } else {
-      vp10_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
+      av1_idct4x4_add(dqcoeff, dst, pd->dst.stride, p->eobs[block]);
     }
   }
 }
 
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
-  vp10_subtract_plane(x, bsize, 0);
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize) {
+  av1_subtract_plane(x, bsize, 0);
+  av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, 0,
                                           encode_block_pass1, x);
 }
 
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
   MACROBLOCKD *const xd = &x->e_mbd;
   struct optimize_ctx ctx;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
@@ -1066,21 +1066,21 @@ void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize) {
   if (x->skip) return;
 
   for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-    if (!x->skip_recode) vp10_subtract_plane(x, bsize, plane);
+    if (!x->skip_recode) av1_subtract_plane(x, bsize, plane);
 
     if (x->optimize && (!x->skip_recode || !x->skip_optimize)) {
       const struct macroblockd_plane *const pd = &xd->plane[plane];
       const TX_SIZE tx_size = plane ? get_uv_tx_size(mbmi, pd) : mbmi->tx_size;
-      vp10_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
+      av1_get_entropy_contexts(bsize, tx_size, pd, ctx.ta[plane],
                                 ctx.tl[plane]);
     }
 
-    vp10_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
+    av1_foreach_transformed_block_in_plane(xd, bsize, plane, encode_block,
                                             &arg);
   }
 }
 
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
                              BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
                              void *arg) {
   struct encode_b_args *const args = arg;
@@ -1115,10 +1115,10 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
   src_diff = &p->src_diff[4 * (blk_row * diff_stride + blk_col)];
 
   mode = plane == 0 ? get_y_mode(xd->mi[0], block) : mbmi->uv_mode;
-  vp10_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
+  av1_predict_intra_block(xd, bwl, bhl, tx_size, mode, dst, dst_stride, dst,
                            dst_stride, blk_col, blk_row, plane);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     switch (tx_size) {
       case TX_32X32:
@@ -1138,7 +1138,7 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
 #endif
         }
         if (*eob)
-          vp10_highbd_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, xd->bd,
+          av1_highbd_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, xd->bd,
                                          tx_type);
         break;
       case TX_16X16:
@@ -1156,7 +1156,7 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
 #endif
         }
         if (*eob)
-          vp10_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd,
+          av1_highbd_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, xd->bd,
                                          tx_type);
         break;
       case TX_8X8:
@@ -1174,14 +1174,14 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
 #endif
         }
         if (*eob)
-          vp10_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd,
+          av1_highbd_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, xd->bd,
                                        tx_type);
         break;
       case TX_4X4:
         if (!x->skip_recode) {
           aom_highbd_subtract_block(4, 4, src_diff, diff_stride, src,
                                     src_stride, dst, dst_stride, xd->bd);
-          vp10_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+          av1_highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                                    xd->lossless[seg_id]);
           aom_highbd_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round,
                                 p->quant, p->quant_shift, qcoeff, dqcoeff,
@@ -1194,10 +1194,10 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
         }
 
         if (*eob)
-          // this is like vp10_short_idct4x4 but has a special case around
+          // this is like av1_short_idct4x4 but has a special case around
           // eob<=1 which is significant (not just an optimization) for the
           // lossless case.
-          vp10_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd,
+          av1_highbd_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, xd->bd,
                                        tx_type, xd->lossless[seg_id]);
         break;
       default:
@@ -1207,7 +1207,7 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
     if (*eob) *(args->skip) = 0;
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   switch (tx_size) {
     case TX_32X32:
@@ -1226,7 +1226,7 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
 #endif
       }
       if (*eob)
-        vp10_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type);
+        av1_inv_txfm_add_32x32(dqcoeff, dst, dst_stride, *eob, tx_type);
       break;
     case TX_16X16:
       if (!x->skip_recode) {
@@ -1243,7 +1243,7 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
 #endif
       }
       if (*eob)
-        vp10_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
+        av1_inv_txfm_add_16x16(dqcoeff, dst, dst_stride, *eob, tx_type);
       break;
     case TX_8X8:
       if (!x->skip_recode) {
@@ -1259,13 +1259,13 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
                        scan_order->iscan, qmatrix, iqmatrix);
 #endif
       }
-      if (*eob) vp10_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
+      if (*eob) av1_inv_txfm_add_8x8(dqcoeff, dst, dst_stride, *eob, tx_type);
       break;
     case TX_4X4:
       if (!x->skip_recode) {
         aom_subtract_block(4, 4, src_diff, diff_stride, src, src_stride, dst,
                            dst_stride);
-        vp10_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
+        av1_fwd_txfm_4x4(src_diff, coeff, diff_stride, tx_type,
                           xd->lossless[seg_id]);
         aom_quantize_b(coeff, 16, x->skip_block, p->zbin, p->round, p->quant,
                        p->quant_shift, qcoeff, dqcoeff, pd->dequant, eob,
@@ -1278,10 +1278,10 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
       }
 
       if (*eob) {
-        // this is like vp10_short_idct4x4 but has a special case around eob<=1
+        // this is like av1_short_idct4x4 but has a special case around eob<=1
         // which is significant (not just an optimization) for the lossless
         // case.
-        vp10_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type,
+        av1_inv_txfm_add_4x4(dqcoeff, dst, dst_stride, *eob, tx_type,
                               xd->lossless[seg_id]);
       }
       break;
@@ -1292,10 +1292,10 @@ void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
   if (*eob) *(args->skip) = 0;
 }
 
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   const MACROBLOCKD *const xd = &x->e_mbd;
   struct encode_b_args arg = { x, NULL, &xd->mi[0]->mbmi.skip };
 
-  vp10_foreach_transformed_block_in_plane(xd, bsize, plane,
-                                          vp10_encode_block_intra, &arg);
+  av1_foreach_transformed_block_in_plane(xd, bsize, plane,
+                                          av1_encode_block_intra, &arg);
 }
diff --git a/av1/encoder/encodemb.h b/av1/encoder/encodemb.h
index cd3b6772ed04bffe1ca3c6dd5fad7a7838505da9..f0ebaea45345290e8d56946814959ef7022058c8 100644
--- a/av1/encoder/encodemb.h
+++ b/av1/encoder/encodemb.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_ENCODEMB_H_
-#define VP10_ENCODER_ENCODEMB_H_
+#ifndef AV1_ENCODER_ENCODEMB_H_
+#define AV1_ENCODER_ENCODEMB_H_
 
 #include "./aom_config.h"
 #include "av1/encoder/block.h"
@@ -24,33 +24,33 @@ struct encode_b_args {
   struct optimize_ctx *ctx;
   int8_t *skip;
 };
-void vp10_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
-void vp10_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_encode_sb(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_encode_sby_pass1(MACROBLOCK *x, BLOCK_SIZE bsize);
+void av1_xform_quant_fp(MACROBLOCK *x, int plane, int block, int blk_row,
                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant_dc(MACROBLOCK *x, int plane, int block, int blk_row,
                          int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
-void vp10_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
+void av1_xform_quant(MACROBLOCK *x, int plane, int block, int blk_row,
                       int blk_col, BLOCK_SIZE plane_bsize, TX_SIZE tx_size);
 
-void vp10_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+void av1_subtract_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
-void vp10_encode_block_intra(int plane, int block, int blk_row, int blk_col,
+void av1_encode_block_intra(int plane, int block, int blk_row, int blk_col,
                              BLOCK_SIZE plane_bsize, TX_SIZE tx_size,
                              void *arg);
 
-void vp10_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+void av1_encode_intra_block_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
-void vp10_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+void av1_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
                        int diff_stride, TX_TYPE tx_type, int lossless);
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_fwd_txfm_4x4(const int16_t *src_diff, tran_low_t *coeff,
                               int diff_stride, TX_TYPE tx_type, int lossless);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODEMB_H_
+#endif  // AV1_ENCODER_ENCODEMB_H_
diff --git a/av1/encoder/encodemv.c b/av1/encoder/encodemv.c
index 5cf3ad851a312e361b00566a440f66a72c0dd5dc..25c577e83200ec5904626dfa6ae17f5196f80aa9 100644
--- a/av1/encoder/encodemv.c
+++ b/av1/encoder/encodemv.c
@@ -20,16 +20,16 @@
 
 #include "aom_dsp/aom_dsp_common.h"
 
-static struct vp10_token mv_joint_encodings[MV_JOINTS];
-static struct vp10_token mv_class_encodings[MV_CLASSES];
-static struct vp10_token mv_fp_encodings[MV_FP_SIZE];
-static struct vp10_token mv_class0_encodings[CLASS0_SIZE];
-
-void vp10_entropy_mv_init(void) {
-  vp10_tokens_from_tree(mv_joint_encodings, vp10_mv_joint_tree);
-  vp10_tokens_from_tree(mv_class_encodings, vp10_mv_class_tree);
-  vp10_tokens_from_tree(mv_class0_encodings, vp10_mv_class0_tree);
-  vp10_tokens_from_tree(mv_fp_encodings, vp10_mv_fp_tree);
+static struct av1_token mv_joint_encodings[MV_JOINTS];
+static struct av1_token mv_class_encodings[MV_CLASSES];
+static struct av1_token mv_fp_encodings[MV_FP_SIZE];
+static struct av1_token mv_class0_encodings[CLASS0_SIZE];
+
+void av1_entropy_mv_init(void) {
+  av1_tokens_from_tree(mv_joint_encodings, av1_mv_joint_tree);
+  av1_tokens_from_tree(mv_class_encodings, av1_mv_class_tree);
+  av1_tokens_from_tree(mv_class0_encodings, av1_mv_class0_tree);
+  av1_tokens_from_tree(mv_fp_encodings, av1_mv_fp_tree);
 }
 
 static void encode_mv_component(aom_writer *w, int comp,
@@ -37,7 +37,7 @@ static void encode_mv_component(aom_writer *w, int comp,
   int offset;
   const int sign = comp < 0;
   const int mag = sign ? -comp : comp;
-  const int mv_class = vp10_get_mv_class(mag - 1, &offset);
+  const int mv_class = av1_get_mv_class(mag - 1, &offset);
   const int d = offset >> 3;         // int mv data
   const int fr = (offset >> 1) & 3;  // fractional mv data
   const int hp = offset & 1;         // high precision mv data
@@ -48,12 +48,12 @@ static void encode_mv_component(aom_writer *w, int comp,
   aom_write(w, sign, mvcomp->sign);
 
   // Class
-  vp10_write_token(w, vp10_mv_class_tree, mvcomp->classes,
+  av1_write_token(w, av1_mv_class_tree, mvcomp->classes,
                    &mv_class_encodings[mv_class]);
 
   // Integer bits
   if (mv_class == MV_CLASS_0) {
-    vp10_write_token(w, vp10_mv_class0_tree, mvcomp->class0,
+    av1_write_token(w, av1_mv_class0_tree, mvcomp->class0,
                      &mv_class0_encodings[d]);
   } else {
     int i;
@@ -62,7 +62,7 @@ static void encode_mv_component(aom_writer *w, int comp,
   }
 
   // Fractional bits
-  vp10_write_token(w, vp10_mv_fp_tree,
+  av1_write_token(w, av1_mv_fp_tree,
                    mv_class == MV_CLASS_0 ? mvcomp->class0_fp[d] : mvcomp->fp,
                    &mv_fp_encodings[fr]);
 
@@ -80,30 +80,30 @@ static void build_nmv_component_cost_table(int *mvcost,
   int class0_fp_cost[CLASS0_SIZE][MV_FP_SIZE], fp_cost[MV_FP_SIZE];
   int class0_hp_cost[2], hp_cost[2];
 
-  sign_cost[0] = vp10_cost_zero(mvcomp->sign);
-  sign_cost[1] = vp10_cost_one(mvcomp->sign);
-  vp10_cost_tokens(class_cost, mvcomp->classes, vp10_mv_class_tree);
-  vp10_cost_tokens(class0_cost, mvcomp->class0, vp10_mv_class0_tree);
+  sign_cost[0] = av1_cost_zero(mvcomp->sign);
+  sign_cost[1] = av1_cost_one(mvcomp->sign);
+  av1_cost_tokens(class_cost, mvcomp->classes, av1_mv_class_tree);
+  av1_cost_tokens(class0_cost, mvcomp->class0, av1_mv_class0_tree);
   for (i = 0; i < MV_OFFSET_BITS; ++i) {
-    bits_cost[i][0] = vp10_cost_zero(mvcomp->bits[i]);
-    bits_cost[i][1] = vp10_cost_one(mvcomp->bits[i]);
+    bits_cost[i][0] = av1_cost_zero(mvcomp->bits[i]);
+    bits_cost[i][1] = av1_cost_one(mvcomp->bits[i]);
   }
 
   for (i = 0; i < CLASS0_SIZE; ++i)
-    vp10_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], vp10_mv_fp_tree);
-  vp10_cost_tokens(fp_cost, mvcomp->fp, vp10_mv_fp_tree);
+    av1_cost_tokens(class0_fp_cost[i], mvcomp->class0_fp[i], av1_mv_fp_tree);
+  av1_cost_tokens(fp_cost, mvcomp->fp, av1_mv_fp_tree);
 
   if (usehp) {
-    class0_hp_cost[0] = vp10_cost_zero(mvcomp->class0_hp);
-    class0_hp_cost[1] = vp10_cost_one(mvcomp->class0_hp);
-    hp_cost[0] = vp10_cost_zero(mvcomp->hp);
-    hp_cost[1] = vp10_cost_one(mvcomp->hp);
+    class0_hp_cost[0] = av1_cost_zero(mvcomp->class0_hp);
+    class0_hp_cost[1] = av1_cost_one(mvcomp->class0_hp);
+    hp_cost[0] = av1_cost_zero(mvcomp->hp);
+    hp_cost[1] = av1_cost_one(mvcomp->hp);
   }
   mvcost[0] = 0;
   for (v = 1; v <= MV_MAX; ++v) {
     int z, c, o, d, e, f, cost = 0;
     z = v - 1;
-    c = vp10_get_mv_class(z, &o);
+    c = av1_get_mv_class(z, &o);
     cost += class_cost[c];
     d = (o >> 3);     /* int mv data */
     f = (o >> 1) & 3; /* fractional pel mv data */
@@ -136,11 +136,11 @@ static void update_mv(aom_writer *w, const unsigned int ct[2], aom_prob *cur_p,
                       aom_prob upd_p) {
 #if CONFIG_MISC_FIXES
   (void)upd_p;
-  vp10_cond_prob_diff_update(w, cur_p, ct);
+  av1_cond_prob_diff_update(w, cur_p, ct);
 #else
   const aom_prob new_p = get_binary_prob(ct[0], ct[1]) | 1;
-  const int update = cost_branch256(ct, *cur_p) + vp10_cost_zero(upd_p) >
-                     cost_branch256(ct, new_p) + vp10_cost_one(upd_p) + 7 * 256;
+  const int update = cost_branch256(ct, *cur_p) + av1_cost_zero(upd_p) >
+                     cost_branch256(ct, new_p) + av1_cost_one(upd_p) + 7 * 256;
   aom_write(w, update, upd_p);
   if (update) {
     *cur_p = new_p;
@@ -159,17 +159,17 @@ static void write_mv_update(const aom_tree_index *tree,
   // Assuming max number of probabilities <= 32
   assert(n <= 32);
 
-  vp10_tree_probs_from_distribution(tree, branch_ct, counts);
+  av1_tree_probs_from_distribution(tree, branch_ct, counts);
   for (i = 0; i < n - 1; ++i)
     update_mv(w, branch_ct[i], &probs[i], MV_UPDATE_PROB);
 }
 
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
+void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
                           nmv_context_counts *const counts) {
   int i, j;
   nmv_context *const mvc = &cm->fc->nmvc;
 
-  write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
+  write_mv_update(av1_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS,
                   w);
 
   for (i = 0; i < 2; ++i) {
@@ -177,9 +177,9 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
     nmv_component_counts *comp_counts = &counts->comps[i];
 
     update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
-    write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+    write_mv_update(av1_mv_class_tree, comp->classes, comp_counts->classes,
                     MV_CLASSES, w);
-    write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+    write_mv_update(av1_mv_class0_tree, comp->class0, comp_counts->class0,
                     CLASS0_SIZE, w);
     for (j = 0; j < MV_OFFSET_BITS; ++j)
       update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
@@ -187,10 +187,10 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
 
   for (i = 0; i < 2; ++i) {
     for (j = 0; j < CLASS0_SIZE; ++j)
-      write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+      write_mv_update(av1_mv_fp_tree, mvc->comps[i].class0_fp[j],
                       counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
 
-    write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+    write_mv_update(av1_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
                     MV_FP_SIZE, w);
   }
 
@@ -203,13 +203,13 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
   }
 }
 
-void vp10_encode_mv(VP10_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
                     const nmv_context *mvctx, int usehp) {
   const MV diff = { mv->row - ref->row, mv->col - ref->col };
-  const MV_JOINT_TYPE j = vp10_get_mv_joint(&diff);
-  usehp = usehp && vp10_use_mv_hp(ref);
+  const MV_JOINT_TYPE j = av1_get_mv_joint(&diff);
+  usehp = usehp && av1_use_mv_hp(ref);
 
-  vp10_write_token(w, vp10_mv_joint_tree, mvctx->joints,
+  av1_write_token(w, av1_mv_joint_tree, mvctx->joints,
                    &mv_joint_encodings[j]);
   if (mv_joint_vertical(j))
     encode_mv_component(w, diff.row, &mvctx->comps[0], usehp);
@@ -225,9 +225,9 @@ void vp10_encode_mv(VP10_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
   }
 }
 
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
                                const nmv_context *ctx, int usehp) {
-  vp10_cost_tokens(mvjoint, ctx->joints, vp10_mv_joint_tree);
+  av1_cost_tokens(mvjoint, ctx->joints, av1_mv_joint_tree);
   build_nmv_component_cost_table(mvcost[0], &ctx->comps[0], usehp);
   build_nmv_component_cost_table(mvcost[1], &ctx->comps[1], usehp);
 }
@@ -240,11 +240,11 @@ static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
     const MV diff = { mvs[i].as_mv.row - ref->row,
                       mvs[i].as_mv.col - ref->col };
-    vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
+    av1_inc_mv(&diff, counts, av1_use_mv_hp(ref));
   }
 }
 
-void vp10_update_mv_count(ThreadData *td) {
+void av1_update_mv_count(ThreadData *td) {
   const MACROBLOCKD *xd = &td->mb.e_mbd;
   const MODE_INFO *mi = xd->mi[0];
   const MB_MODE_INFO *const mbmi = &mi->mbmi;
diff --git a/av1/encoder/encodemv.h b/av1/encoder/encodemv.h
index 5b4bf5a115b97603ce5cb37dd15328966ab820f5..1c0d90fc24a0e3a6e43a4ff1a761fe9db8d402eb 100644
--- a/av1/encoder/encodemv.h
+++ b/av1/encoder/encodemv.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_ENCODEMV_H_
-#define VP10_ENCODER_ENCODEMV_H_
+#ifndef AV1_ENCODER_ENCODEMV_H_
+#define AV1_ENCODER_ENCODEMV_H_
 
 #include "av1/encoder/encoder.h"
 
@@ -18,21 +18,21 @@
 extern "C" {
 #endif
 
-void vp10_entropy_mv_init(void);
+void av1_entropy_mv_init(void);
 
-void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, aom_writer *w,
+void av1_write_nmv_probs(AV1_COMMON *cm, int usehp, aom_writer *w,
                           nmv_context_counts *const counts);
 
-void vp10_encode_mv(VP10_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
+void av1_encode_mv(AV1_COMP *cpi, aom_writer *w, const MV *mv, const MV *ref,
                     const nmv_context *mvctx, int usehp);
 
-void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
+void av1_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
                                const nmv_context *mvctx, int usehp);
 
-void vp10_update_mv_count(ThreadData *td);
+void av1_update_mv_count(ThreadData *td);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODEMV_H_
+#endif  // AV1_ENCODER_ENCODEMV_H_
diff --git a/av1/encoder/encoder.c b/av1/encoder/encoder.c
index 8afb342c6f46c8ff7a8f46c0c0852060005ff356..e84a99a046cd0394598f87261e50cd3b6fde401d 100644
--- a/av1/encoder/encoder.c
+++ b/av1/encoder/encoder.c
@@ -119,7 +119,7 @@ static INLINE void Scale2Ratio(VPX_SCALING mode, int *hr, int *hs) {
 
 // Mark all inactive blocks as active. Other segmentation features may be set
 // so memset cannot be used, instead only inactive blocks should be reset.
-static void suppress_active_map(VP10_COMP *cpi) {
+static void suppress_active_map(AV1_COMP *cpi) {
   unsigned char *const seg_map = cpi->segmentation_map;
   int i;
   if (cpi->active_map.enabled || cpi->active_map.update)
@@ -128,7 +128,7 @@ static void suppress_active_map(VP10_COMP *cpi) {
         seg_map[i] = AM_SEGMENT_ID_ACTIVE;
 }
 
-static void apply_active_map(VP10_COMP *cpi) {
+static void apply_active_map(AV1_COMP *cpi) {
   struct segmentation *const seg = &cpi->common.seg;
   unsigned char *const seg_map = cpi->segmentation_map;
   const unsigned char *const active_map = cpi->active_map.map;
@@ -145,16 +145,16 @@ static void apply_active_map(VP10_COMP *cpi) {
     if (cpi->active_map.enabled) {
       for (i = 0; i < cpi->common.mi_rows * cpi->common.mi_cols; ++i)
         if (seg_map[i] == AM_SEGMENT_ID_ACTIVE) seg_map[i] = active_map[i];
-      vp10_enable_segmentation(seg);
-      vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
-      vp10_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+      av1_enable_segmentation(seg);
+      av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+      av1_enable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
       // Setting the data to -MAX_LOOP_FILTER will result in the computed loop
       // filter level being zero regardless of the value of seg->abs_delta.
-      vp10_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
+      av1_set_segdata(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF,
                        -MAX_LOOP_FILTER);
     } else {
-      vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
-      vp10_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
+      av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_SKIP);
+      av1_disable_segfeature(seg, AM_SEGMENT_ID_INACTIVE, SEG_LVL_ALT_LF);
       if (seg->enabled) {
         seg->update_data = 1;
         seg->update_map = 1;
@@ -164,7 +164,7 @@ static void apply_active_map(VP10_COMP *cpi) {
   }
 }
 
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
                         int cols) {
   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols) {
     unsigned char *const active_map_8x8 = cpi->active_map.map;
@@ -191,7 +191,7 @@ int vp10_set_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
   }
 }
 
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *new_map_16x16, int rows,
                         int cols) {
   if (rows == cpi->common.mb_rows && cols == cpi->common.mb_cols &&
       new_map_16x16) {
@@ -216,7 +216,7 @@ int vp10_get_active_map(VP10_COMP *cpi, unsigned char *new_map_16x16, int rows,
   }
 }
 
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv) {
   MACROBLOCK *const mb = &cpi->td.mb;
   cpi->common.allow_high_precision_mv = allow_high_precision_mv;
   if (cpi->common.allow_high_precision_mv) {
@@ -228,15 +228,15 @@ void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv) {
   }
 }
 
-static void setup_frame(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void setup_frame(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   // Set up entropy context depending on frame type. The decoder mandates
   // the use of the default context, index 0, for keyframes and inter
   // frames where the error_resilient_mode or intra_only flag is set. For
   // other inter-frames the encoder currently uses only two contexts;
   // context 1 for ALTREF frames and context 0 for the others.
   if (frame_is_intra_only(cm) || cm->error_resilient_mode) {
-    vp10_setup_past_independence(cm);
+    av1_setup_past_independence(cm);
   } else {
     cm->frame_context_idx = cpi->refresh_alt_ref_frame;
   }
@@ -244,14 +244,14 @@ static void setup_frame(VP10_COMP *cpi) {
   if (cm->frame_type == KEY_FRAME) {
     cpi->refresh_golden_frame = 1;
     cpi->refresh_alt_ref_frame = 1;
-    vp10_zero(cpi->interp_filter_selected);
+    av1_zero(cpi->interp_filter_selected);
   } else {
     *cm->fc = cm->frame_contexts[cm->frame_context_idx];
-    vp10_zero(cpi->interp_filter_selected[0]);
+    av1_zero(cpi->interp_filter_selected[0]);
   }
 }
 
-static void vp10_enc_setup_mi(VP10_COMMON *cm) {
+static void av1_enc_setup_mi(AV1_COMMON *cm) {
   int i;
   cm->mi = cm->mip + cm->mi_stride + 1;
   memset(cm->mip, 0, cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mip));
@@ -269,7 +269,7 @@ static void vp10_enc_setup_mi(VP10_COMMON *cm) {
          cm->mi_stride * (cm->mi_rows + 1) * sizeof(*cm->mi_grid_base));
 }
 
-static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
+static int av1_enc_alloc_mi(AV1_COMMON *cm, int mi_size) {
   cm->mip = aom_calloc(mi_size, sizeof(*cm->mip));
   if (!cm->mip) return 1;
   cm->prev_mip = aom_calloc(mi_size, sizeof(*cm->prev_mip));
@@ -285,7 +285,7 @@ static int vp10_enc_alloc_mi(VP10_COMMON *cm, int mi_size) {
   return 0;
 }
 
-static void vp10_enc_free_mi(VP10_COMMON *cm) {
+static void av1_enc_free_mi(AV1_COMMON *cm) {
   aom_free(cm->mip);
   cm->mip = NULL;
   aom_free(cm->prev_mip);
@@ -296,7 +296,7 @@ static void vp10_enc_free_mi(VP10_COMMON *cm) {
   cm->prev_mi_grid_base = NULL;
 }
 
-static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) {
+static void av1_swap_mi_and_prev_mi(AV1_COMMON *cm) {
   // Current mip will be the prev_mip for the next frame.
   MODE_INFO **temp_base = cm->prev_mi_grid_base;
   MODE_INFO *temp = cm->prev_mip;
@@ -313,24 +313,24 @@ static void vp10_swap_mi_and_prev_mi(VP10_COMMON *cm) {
   cm->prev_mi_grid_visible = cm->prev_mi_grid_base + cm->mi_stride + 1;
 }
 
-void vp10_initialize_enc(void) {
+void av1_initialize_enc(void) {
   static volatile int init_done = 0;
 
   if (!init_done) {
     av1_rtcd();
     aom_dsp_rtcd();
     aom_scale_rtcd();
-    vp10_init_intra_predictors();
-    vp10_init_me_luts();
-    vp10_rc_init_minq_luts();
-    vp10_entropy_mv_init();
-    vp10_encode_token_init();
+    av1_init_intra_predictors();
+    av1_init_me_luts();
+    av1_rc_init_minq_luts();
+    av1_entropy_mv_init();
+    av1_encode_token_init();
     init_done = 1;
   }
 }
 
-static void dealloc_compressor_data(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void dealloc_compressor_data(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
   aom_free(cpi->mbmi_ext_base);
   cpi->mbmi_ext_base = NULL;
@@ -364,25 +364,25 @@ static void dealloc_compressor_data(VP10_COMP *cpi) {
   cpi->nmvsadcosts_hp[0] = NULL;
   cpi->nmvsadcosts_hp[1] = NULL;
 
-  vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  av1_cyclic_refresh_free(cpi->cyclic_refresh);
   cpi->cyclic_refresh = NULL;
 
   aom_free(cpi->active_map.map);
   cpi->active_map.map = NULL;
 
-  vp10_free_ref_frame_buffers(cm->buffer_pool);
-  vp10_free_context_buffers(cm);
+  av1_free_ref_frame_buffers(cm->buffer_pool);
+  av1_free_context_buffers(cm);
 
   aom_free_frame_buffer(&cpi->last_frame_uf);
   aom_free_frame_buffer(&cpi->scaled_source);
   aom_free_frame_buffer(&cpi->scaled_last_source);
   aom_free_frame_buffer(&cpi->alt_ref_buffer);
-  vp10_lookahead_destroy(cpi->lookahead);
+  av1_lookahead_destroy(cpi->lookahead);
 
   aom_free(cpi->tile_tok[0][0]);
   cpi->tile_tok[0][0] = 0;
 
-  vp10_free_pc_tree(&cpi->td);
+  av1_free_pc_tree(&cpi->td);
 
   if (cpi->source_diff_var != NULL) {
     aom_free(cpi->source_diff_var);
@@ -390,15 +390,15 @@ static void dealloc_compressor_data(VP10_COMP *cpi) {
   }
 }
 
-static void save_coding_context(VP10_COMP *cpi) {
+static void save_coding_context(AV1_COMP *cpi) {
   CODING_CONTEXT *const cc = &cpi->coding_context;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
 
   // Stores a snapshot of key state variables which can subsequently be
-  // restored with a call to vp10_restore_coding_context. These functions are
-  // intended for use in a re-code loop in vp10_compress_frame where the
+  // restored with a call to av1_restore_coding_context. These functions are
+  // intended for use in a re-code loop in av1_compress_frame where the
   // quantizer value is adjusted between loop iterations.
-  vp10_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
+  av1_copy(cc->nmvjointcost, cpi->td.mb.nmvjointcost);
 
   memcpy(cc->nmvcosts[0], cpi->nmvcosts[0],
          MV_VALS * sizeof(*cpi->nmvcosts[0]));
@@ -410,25 +410,25 @@ static void save_coding_context(VP10_COMP *cpi) {
          MV_VALS * sizeof(*cpi->nmvcosts_hp[1]));
 
 #if !CONFIG_MISC_FIXES
-  vp10_copy(cc->segment_pred_probs, cm->segp.pred_probs);
+  av1_copy(cc->segment_pred_probs, cm->segp.pred_probs);
 #endif
 
   memcpy(cpi->coding_context.last_frame_seg_map_copy, cm->last_frame_seg_map,
          (cm->mi_rows * cm->mi_cols));
 
-  vp10_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
-  vp10_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
+  av1_copy(cc->last_ref_lf_deltas, cm->lf.last_ref_deltas);
+  av1_copy(cc->last_mode_lf_deltas, cm->lf.last_mode_deltas);
 
   cc->fc = *cm->fc;
 }
 
-static void restore_coding_context(VP10_COMP *cpi) {
+static void restore_coding_context(AV1_COMP *cpi) {
   CODING_CONTEXT *const cc = &cpi->coding_context;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
 
   // Restore key state variables to the snapshot state stored in the
-  // previous call to vp10_save_coding_context.
-  vp10_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
+  // previous call to av1_save_coding_context.
+  av1_copy(cpi->td.mb.nmvjointcost, cc->nmvjointcost);
 
   memcpy(cpi->nmvcosts[0], cc->nmvcosts[0], MV_VALS * sizeof(*cc->nmvcosts[0]));
   memcpy(cpi->nmvcosts[1], cc->nmvcosts[1], MV_VALS * sizeof(*cc->nmvcosts[1]));
@@ -438,20 +438,20 @@ static void restore_coding_context(VP10_COMP *cpi) {
          MV_VALS * sizeof(*cc->nmvcosts_hp[1]));
 
 #if !CONFIG_MISC_FIXES
-  vp10_copy(cm->segp.pred_probs, cc->segment_pred_probs);
+  av1_copy(cm->segp.pred_probs, cc->segment_pred_probs);
 #endif
 
   memcpy(cm->last_frame_seg_map, cpi->coding_context.last_frame_seg_map_copy,
          (cm->mi_rows * cm->mi_cols));
 
-  vp10_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
-  vp10_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
+  av1_copy(cm->lf.last_ref_deltas, cc->last_ref_lf_deltas);
+  av1_copy(cm->lf.last_mode_deltas, cc->last_mode_lf_deltas);
 
   *cm->fc = cc->fc;
 }
 
-static void configure_static_seg_features(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void configure_static_seg_features(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   struct segmentation *const seg = &cm->seg;
 
@@ -467,10 +467,10 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
     cpi->static_mb_pct = 0;
 
     // Disable segmentation
-    vp10_disable_segmentation(seg);
+    av1_disable_segmentation(seg);
 
     // Clear down the segment features.
-    vp10_clearall_segfeatures(seg);
+    av1_clearall_segfeatures(seg);
   } else if (cpi->refresh_alt_ref_frame) {
     // If this is an alt ref frame
     // Clear down the global segmentation map
@@ -480,12 +480,12 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
     cpi->static_mb_pct = 0;
 
     // Disable segmentation and individual segment features by default
-    vp10_disable_segmentation(seg);
-    vp10_clearall_segfeatures(seg);
+    av1_disable_segmentation(seg);
+    av1_clearall_segfeatures(seg);
 
     // Scan frames from current to arf frame.
     // This function re-enables segmentation if appropriate.
-    vp10_update_mbgraph_stats(cpi);
+    av1_update_mbgraph_stats(cpi);
 
     // If segmentation was enabled set those features needed for the
     // arf itself.
@@ -494,12 +494,12 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
       seg->update_data = 1;
 
       qi_delta =
-          vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
-      vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
-      vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+          av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 0.875, cm->bit_depth);
+      av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta - 2);
+      av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
 
-      vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
-      vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+      av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+      av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
 
       // Where relevant assume segment data is delta data
       seg->abs_delta = SEGMENT_DELTADATA;
@@ -515,32 +515,32 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
         seg->update_data = 1;
         seg->abs_delta = SEGMENT_DELTADATA;
 
-        qi_delta = vp10_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
+        qi_delta = av1_compute_qdelta(rc, rc->avg_q, rc->avg_q * 1.125,
                                        cm->bit_depth);
-        vp10_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
-        vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
+        av1_set_segdata(seg, 1, SEG_LVL_ALT_Q, qi_delta + 2);
+        av1_enable_segfeature(seg, 1, SEG_LVL_ALT_Q);
 
-        vp10_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
-        vp10_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
+        av1_set_segdata(seg, 1, SEG_LVL_ALT_LF, -2);
+        av1_enable_segfeature(seg, 1, SEG_LVL_ALT_LF);
 
         // Segment coding disabled for compred testing
         if (high_q || (cpi->static_mb_pct == 100)) {
-          vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
-          vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
-          vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+          av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+          av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+          av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
         }
       } else {
         // Disable segmentation and clear down features if alt ref
         // is not active for this group
 
-        vp10_disable_segmentation(seg);
+        av1_disable_segmentation(seg);
 
         memset(cpi->segmentation_map, 0, cm->mi_rows * cm->mi_cols);
 
         seg->update_map = 0;
         seg->update_data = 0;
 
-        vp10_clearall_segfeatures(seg);
+        av1_clearall_segfeatures(seg);
       }
     } else if (rc->is_src_frame_alt_ref) {
       // Special case where we are coding over the top of a previous
@@ -548,19 +548,19 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
       // Segment coding disabled for compred testing
 
       // Enable ref frame features for segment 0 as well
-      vp10_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
-      vp10_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
+      av1_enable_segfeature(seg, 0, SEG_LVL_REF_FRAME);
+      av1_enable_segfeature(seg, 1, SEG_LVL_REF_FRAME);
 
       // All mbs should use ALTREF_FRAME
-      vp10_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
-      vp10_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
-      vp10_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
-      vp10_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+      av1_clear_segdata(seg, 0, SEG_LVL_REF_FRAME);
+      av1_set_segdata(seg, 0, SEG_LVL_REF_FRAME, ALTREF_FRAME);
+      av1_clear_segdata(seg, 1, SEG_LVL_REF_FRAME);
+      av1_set_segdata(seg, 1, SEG_LVL_REF_FRAME, ALTREF_FRAME);
 
       // Skip all MBs if high Q (0,0 mv and skip coeffs)
       if (high_q) {
-        vp10_enable_segfeature(seg, 0, SEG_LVL_SKIP);
-        vp10_enable_segfeature(seg, 1, SEG_LVL_SKIP);
+        av1_enable_segfeature(seg, 0, SEG_LVL_SKIP);
+        av1_enable_segfeature(seg, 1, SEG_LVL_SKIP);
       }
       // Enable data update
       seg->update_data = 1;
@@ -574,8 +574,8 @@ static void configure_static_seg_features(VP10_COMP *cpi) {
   }
 }
 
-static void update_reference_segmentation_map(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void update_reference_segmentation_map(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MODE_INFO **mi_8x8_ptr = cm->mi_grid_visible;
   uint8_t *cache_ptr = cm->last_frame_seg_map;
   int row, col;
@@ -590,14 +590,14 @@ static void update_reference_segmentation_map(VP10_COMP *cpi) {
   }
 }
 
-static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static void alloc_raw_frame_buffers(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
 
   if (!cpi->lookahead)
-    cpi->lookahead = vp10_lookahead_init(oxcf->width, oxcf->height,
+    cpi->lookahead = av1_lookahead_init(oxcf->width, oxcf->height,
                                          cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                          cm->use_highbitdepth,
 #endif
                                          oxcf->lag_in_frames);
@@ -608,7 +608,7 @@ static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
   // TODO(agrange) Check if ARF is enabled and skip allocation if not.
   if (aom_realloc_frame_buffer(&cpi->alt_ref_buffer, oxcf->width, oxcf->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
                                VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -617,11 +617,11 @@ static void alloc_raw_frame_buffers(VP10_COMP *cpi) {
                        "Failed to allocate altref buffer");
 }
 
-static void alloc_util_frame_buffers(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void alloc_util_frame_buffers(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   if (aom_realloc_frame_buffer(&cpi->last_frame_uf, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
                                VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -631,7 +631,7 @@ static void alloc_util_frame_buffers(VP10_COMP *cpi) {
 
   if (aom_realloc_frame_buffer(&cpi->scaled_source, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
                                VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -641,7 +641,7 @@ static void alloc_util_frame_buffers(VP10_COMP *cpi) {
 
   if (aom_realloc_frame_buffer(&cpi->scaled_last_source, cm->width, cm->height,
                                cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                cm->use_highbitdepth,
 #endif
                                VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment,
@@ -650,8 +650,8 @@ static void alloc_util_frame_buffers(VP10_COMP *cpi) {
                        "Failed to allocate scaled last source buffer");
 }
 
-static int alloc_context_buffers_ext(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+static int alloc_context_buffers_ext(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   int mi_size = cm->mi_cols * cm->mi_rows;
 
   cpi->mbmi_ext_base = aom_calloc(mi_size, sizeof(*cpi->mbmi_ext_base));
@@ -660,10 +660,10 @@ static int alloc_context_buffers_ext(VP10_COMP *cpi) {
   return 0;
 }
 
-void vp10_alloc_compressor_data(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+void av1_alloc_compressor_data(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
 
-  vp10_alloc_context_buffers(cm, cm->width, cm->height);
+  av1_alloc_context_buffers(cm, cm->width, cm->height);
 
   alloc_context_buffers_ext(cpi);
 
@@ -675,53 +675,53 @@ void vp10_alloc_compressor_data(VP10_COMP *cpi) {
                     aom_calloc(tokens, sizeof(*cpi->tile_tok[0][0])));
   }
 
-  vp10_setup_pc_tree(&cpi->common, &cpi->td);
+  av1_setup_pc_tree(&cpi->common, &cpi->td);
 }
 
-void vp10_new_framerate(VP10_COMP *cpi, double framerate) {
+void av1_new_framerate(AV1_COMP *cpi, double framerate) {
   cpi->framerate = framerate < 0.1 ? 30 : framerate;
-  vp10_rc_update_framerate(cpi);
+  av1_rc_update_framerate(cpi);
 }
 
-static void set_tile_limits(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_tile_limits(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
   int min_log2_tile_cols, max_log2_tile_cols;
-  vp10_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
+  av1_get_tile_n_bits(cm->mi_cols, &min_log2_tile_cols, &max_log2_tile_cols);
 
   cm->log2_tile_cols =
       clamp(cpi->oxcf.tile_columns, min_log2_tile_cols, max_log2_tile_cols);
   cm->log2_tile_rows = cpi->oxcf.tile_rows;
 }
 
-static void update_frame_size(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void update_frame_size(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
-  vp10_set_mb_mi(cm, cm->width, cm->height);
-  vp10_init_context_buffers(cm);
-  vp10_init_macroblockd(cm, xd, NULL);
+  av1_set_mb_mi(cm, cm->width, cm->height);
+  av1_init_context_buffers(cm);
+  av1_init_macroblockd(cm, xd, NULL);
   memset(cpi->mbmi_ext_base, 0,
          cm->mi_rows * cm->mi_cols * sizeof(*cpi->mbmi_ext_base));
 
   set_tile_limits(cpi);
 }
 
-static void init_buffer_indices(VP10_COMP *cpi) {
+static void init_buffer_indices(AV1_COMP *cpi) {
   cpi->lst_fb_idx = 0;
   cpi->gld_fb_idx = 1;
   cpi->alt_fb_idx = 2;
 }
 
-static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
-  VP10_COMMON *const cm = &cpi->common;
+static void init_config(struct AV1_COMP *cpi, AV1EncoderConfig *oxcf) {
+  AV1_COMMON *const cm = &cpi->common;
 
   cpi->oxcf = *oxcf;
   cpi->framerate = oxcf->init_framerate;
 
   cm->profile = oxcf->profile;
   cm->bit_depth = oxcf->bit_depth;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   cm->use_highbitdepth = oxcf->use_highbitdepth;
 #endif
   cm->color_space = oxcf->color_space;
@@ -729,13 +729,13 @@ static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
 
   cm->width = oxcf->width;
   cm->height = oxcf->height;
-  vp10_alloc_compressor_data(cpi);
+  av1_alloc_compressor_data(cpi);
 
   // Single thread case: use counts in common.
   cpi->td.counts = &cm->counts;
 
   // change includes all joint functionality
-  vp10_change_config(cpi, oxcf);
+  av1_change_config(cpi, oxcf);
 
   cpi->static_mb_pct = 0;
   cpi->ref_frame_flags = 0;
@@ -744,7 +744,7 @@ static void init_config(struct VP10_COMP *cpi, VP10EncoderConfig *oxcf) {
 }
 
 static void set_rc_buffer_sizes(RATE_CONTROL *rc,
-                                const VP10EncoderConfig *oxcf) {
+                                const AV1EncoderConfig *oxcf) {
   const int64_t bandwidth = oxcf->target_bandwidth;
   const int64_t starting = oxcf->starting_buffer_level_ms;
   const int64_t optimal = oxcf->optimal_buffer_level_ms;
@@ -757,7 +757,7 @@ static void set_rc_buffer_sizes(RATE_CONTROL *rc,
       (maximum == 0) ? bandwidth / 8 : maximum * bandwidth / 1000;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define HIGHBD_BFP(BT, SDF, SDAF, VF, SVF, SVAF, SDX3F, SDX8F, SDX4DF) \
   cpi->fn_ptr[BT].sdf = SDF;                                           \
   cpi->fn_ptr[BT].sdaf = SDAF;                                         \
@@ -924,8 +924,8 @@ MAKE_BFP_SAD8_WRAPPER(aom_highbd_sad4x4x8)
 MAKE_BFP_SAD4D_WRAPPER(aom_highbd_sad4x4x4d)
 /* clang-format on */
 
-static void highbd_set_var_fns(VP10_COMP *const cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void highbd_set_var_fns(AV1_COMP *const cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   if (cm->use_highbitdepth) {
     switch (cm->bit_depth) {
       case VPX_BITS_8:
@@ -1188,10 +1188,10 @@ static void highbd_set_var_fns(VP10_COMP *const cpi) {
     }
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void realloc_segmentation_maps(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void realloc_segmentation_maps(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
 
   // Create the encoder segmentation map and set all entries to 0
   aom_free(cpi->segmentation_map);
@@ -1199,9 +1199,9 @@ static void realloc_segmentation_maps(VP10_COMP *cpi) {
                   aom_calloc(cm->mi_rows * cm->mi_cols, 1));
 
   // Create a map used for cyclic background refresh.
-  if (cpi->cyclic_refresh) vp10_cyclic_refresh_free(cpi->cyclic_refresh);
+  if (cpi->cyclic_refresh) av1_cyclic_refresh_free(cpi->cyclic_refresh);
   CHECK_MEM_ERROR(cm, cpi->cyclic_refresh,
-                  vp10_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
+                  av1_cyclic_refresh_alloc(cm->mi_rows, cm->mi_cols));
 
   // Create a map used to mark inactive areas.
   aom_free(cpi->active_map.map);
@@ -1215,8 +1215,8 @@ static void realloc_segmentation_maps(VP10_COMP *cpi) {
                   aom_calloc(cm->mi_rows * cm->mi_cols, 1));
 }
 
-void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_change_config(struct AV1_COMP *cpi, const AV1EncoderConfig *oxcf) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
   if (cm->profile != oxcf->profile) cm->profile = oxcf->profile;
@@ -1230,9 +1230,9 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
     assert(cm->bit_depth > VPX_BITS_8);
 
   cpi->oxcf = *oxcf;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   cpi->td.mb.e_mbd.bd = (int)cm->bit_depth;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
     rc->baseline_gf_interval = FIXED_GF_INTERVAL;
@@ -1249,8 +1249,8 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
                                         : REFRESH_FRAME_CONTEXT_BACKWARD;
   cm->reset_frame_context = RESET_FRAME_CONTEXT_NONE;
 
-  vp10_reset_segment_features(cm);
-  vp10_set_high_precision_mv(cpi, 0);
+  av1_reset_segment_features(cm);
+  av1_set_high_precision_mv(cpi, 0);
 
   {
     int i;
@@ -1268,7 +1268,7 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
   rc->buffer_level = VPXMIN(rc->buffer_level, rc->maximum_buffer_size);
 
   // Set up frame rate and related parameters rate control values.
-  vp10_new_framerate(cpi, cpi->framerate);
+  av1_new_framerate(cpi, cpi->framerate);
 
   // Set absolute upper and lower quality limits
   rc->worst_quality = cpi->oxcf.worst_allowed_q;
@@ -1288,8 +1288,8 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
 
   if (cpi->initial_width) {
     if (cm->width > cpi->initial_width || cm->height > cpi->initial_height) {
-      vp10_free_context_buffers(cm);
-      vp10_alloc_compressor_data(cpi);
+      av1_free_context_buffers(cm);
+      av1_alloc_compressor_data(cpi);
       realloc_segmentation_maps(cpi);
       cpi->initial_width = cpi->initial_height = 0;
     }
@@ -1310,7 +1310,7 @@ void vp10_change_config(struct VP10_COMP *cpi, const VP10EncoderConfig *oxcf) {
   cpi->ext_refresh_frame_flags_pending = 0;
   cpi->ext_refresh_frame_context_pending = 0;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_set_var_fns(cpi);
 #endif
 }
@@ -1357,26 +1357,26 @@ static void cal_nmvsadcosts_hp(int *mvsadcost[2]) {
   } while (++i <= MV_MAX);
 }
 
-VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
+AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
                                   BufferPool *const pool) {
   unsigned int i;
-  VP10_COMP *volatile const cpi = aom_memalign(32, sizeof(VP10_COMP));
-  VP10_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
+  AV1_COMP *volatile const cpi = aom_memalign(32, sizeof(AV1_COMP));
+  AV1_COMMON *volatile const cm = cpi != NULL ? &cpi->common : NULL;
 
   if (!cm) return NULL;
 
-  vp10_zero(*cpi);
+  av1_zero(*cpi);
 
   if (setjmp(cm->error.jmp)) {
     cm->error.setjmp = 0;
-    vp10_remove_compressor(cpi);
+    av1_remove_compressor(cpi);
     return 0;
   }
 
   cm->error.setjmp = 1;
-  cm->alloc_mi = vp10_enc_alloc_mi;
-  cm->free_mi = vp10_enc_free_mi;
-  cm->setup_mi = vp10_enc_setup_mi;
+  cm->alloc_mi = av1_enc_alloc_mi;
+  cm->free_mi = av1_enc_free_mi;
+  cm->setup_mi = av1_enc_setup_mi;
 
   CHECK_MEM_ERROR(cm, cm->fc, (FRAME_CONTEXT *)aom_calloc(1, sizeof(*cm->fc)));
   CHECK_MEM_ERROR(
@@ -1389,7 +1389,7 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   cpi->common.buffer_pool = pool;
 
   init_config(cpi, oxcf);
-  vp10_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
+  av1_rc_init(&cpi->oxcf, oxcf->pass, &cpi->rc);
 
   cm->current_video_frame = 0;
   cpi->partition_search_skippable_frame = 0;
@@ -1511,7 +1511,7 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
 
   if (oxcf->pass == 1) {
-    vp10_init_first_pass(cpi);
+    av1_init_first_pass(cpi);
   } else if (oxcf->pass == 2) {
     const size_t packet_sz = sizeof(FIRSTPASS_STATS);
     const int packets = (int)(oxcf->two_pass_stats_in.sz / packet_sz);
@@ -1533,11 +1533,11 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
     cpi->twopass.stats_in = cpi->twopass.stats_in_start;
     cpi->twopass.stats_in_end = &cpi->twopass.stats_in[packets - 1];
 
-    vp10_init_second_pass(cpi);
+    av1_init_second_pass(cpi);
   }
 
-  vp10_set_speed_features_framesize_independent(cpi);
-  vp10_set_speed_features_framesize_dependent(cpi);
+  av1_set_speed_features_framesize_independent(cpi);
+  av1_set_speed_features_framesize_dependent(cpi);
 
   // Allocate memory to store variances for a frame.
   CHECK_MEM_ERROR(cm, cpi->source_diff_var, aom_calloc(cm->MBs, sizeof(diff)));
@@ -1606,21 +1606,21 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
       aom_sub_pixel_variance4x4, aom_sub_pixel_avg_variance4x4, aom_sad4x4x3,
       aom_sad4x4x8, aom_sad4x4x4d)
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   highbd_set_var_fns(cpi);
 #endif
 
-  /* vp10_init_quantizer() is first called here. Add check in
-   * vp10_frame_init_quantizer() so that vp10_init_quantizer is only
+  /* av1_init_quantizer() is first called here. Add check in
+   * av1_frame_init_quantizer() so that av1_init_quantizer is only
    * called later when needed. This will avoid unnecessary calls of
-   * vp10_init_quantizer() for every frame.
+   * av1_init_quantizer() for every frame.
    */
-  vp10_init_quantizer(cpi);
+  av1_init_quantizer(cpi);
 #if CONFIG_AOM_QM
   aom_qm_init(cm);
 #endif
 
-  vp10_loop_filter_init(cm);
+  av1_loop_filter_init(cm);
 
   cm->error.setjmp = 0;
 
@@ -1631,8 +1631,8 @@ VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
 #define SNPRINT2(H, T, V) \
   snprintf((H) + strlen(H), sizeof(H) - strlen(H), (T), (V))
 
-void vp10_remove_compressor(VP10_COMP *cpi) {
-  VP10_COMMON *cm;
+void av1_remove_compressor(AV1_COMP *cpi) {
+  AV1_COMMON *cm;
   unsigned int i;
   int t;
 
@@ -1733,14 +1733,14 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
     // Deallocate allocated thread data.
     if (t < cpi->num_workers - 1) {
       aom_free(thread_data->td->counts);
-      vp10_free_pc_tree(thread_data->td);
+      av1_free_pc_tree(thread_data->td);
       aom_free(thread_data->td);
     }
   }
   aom_free(cpi->tile_thr_data);
   aom_free(cpi->workers);
 
-  if (cpi->num_workers > 1) vp10_loop_filter_dealloc(&cpi->lf_row_sync);
+  if (cpi->num_workers > 1) av1_loop_filter_dealloc(&cpi->lf_row_sync);
 
   dealloc_compressor_data(cpi);
 
@@ -1756,8 +1756,8 @@ void vp10_remove_compressor(VP10_COMP *cpi) {
   }
 #endif
 
-  vp10_remove_common(cm);
-  vp10_free_ref_frame_buffers(cm->buffer_pool);
+  av1_remove_common(cm);
+  av1_free_ref_frame_buffers(cm->buffer_pool);
   aom_free(cpi);
 
 #ifdef OUTPUT_YUV_SKINMAP
@@ -1804,7 +1804,7 @@ static void encoder_variance(const uint8_t *a, int a_stride, const uint8_t *b,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void encoder_highbd_variance64(const uint8_t *a8, int a_stride,
                                       const uint8_t *b8, int b_stride, int w,
                                       int h, uint64_t *sse, uint64_t *sum) {
@@ -1836,7 +1836,7 @@ static void encoder_highbd_8_variance(const uint8_t *a8, int a_stride,
   *sse = (unsigned int)sse_long;
   *sum = (int)sum_long;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
                        int b_stride, int width, int height) {
@@ -1878,7 +1878,7 @@ static int64_t get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
   return total_sse;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int64_t highbd_get_sse_shift(const uint8_t *a8, int a_stride,
                                     const uint8_t *b8, int b_stride, int width,
                                     int height, unsigned int input_shift) {
@@ -1931,7 +1931,7 @@ static int64_t highbd_get_sse(const uint8_t *a, int a_stride, const uint8_t *b,
   }
   return total_sse;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 typedef struct {
   double psnr[4];       // total/y/u/v
@@ -1939,7 +1939,7 @@ typedef struct {
   uint32_t samples[4];  // total/y/u/v
 } PSNR_STATS;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
                              const YV12_BUFFER_CONFIG *b, PSNR_STATS *psnr,
                              unsigned int bit_depth,
@@ -1987,7 +1987,7 @@ static void calc_highbd_psnr(const YV12_BUFFER_CONFIG *a,
       aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
 }
 
-#else   // !CONFIG_VPX_HIGHBITDEPTH
+#else   // !CONFIG_AOM_HIGHBITDEPTH
 
 static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
                       PSNR_STATS *psnr) {
@@ -2022,13 +2022,13 @@ static void calc_psnr(const YV12_BUFFER_CONFIG *a, const YV12_BUFFER_CONFIG *b,
   psnr->psnr[0] =
       aom_sse_to_psnr((double)total_samples, peak, (double)total_sse);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static void generate_psnr_packet(VP10_COMP *cpi) {
+static void generate_psnr_packet(AV1_COMP *cpi) {
   struct aom_codec_cx_pkt pkt;
   int i;
   PSNR_STATS psnr;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   calc_highbd_psnr(cpi->Source, cpi->common.frame_to_show, &psnr,
                    cpi->td.mb.e_mbd.bd, cpi->oxcf.input_bit_depth);
 #else
@@ -2044,22 +2044,22 @@ static void generate_psnr_packet(VP10_COMP *cpi) {
   aom_codec_pkt_list_add(cpi->output_pkt_list, &pkt);
 }
 
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags) {
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags) {
   if (ref_frame_flags > 7) return -1;
 
   cpi->ref_frame_flags = ref_frame_flags;
   return 0;
 }
 
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags) {
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags) {
   cpi->ext_refresh_golden_frame = (ref_frame_flags & VPX_GOLD_FLAG) != 0;
   cpi->ext_refresh_alt_ref_frame = (ref_frame_flags & VPX_ALT_FLAG) != 0;
   cpi->ext_refresh_last_frame = (ref_frame_flags & VPX_LAST_FLAG) != 0;
   cpi->ext_refresh_frame_flags_pending = 1;
 }
 
-static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
-    VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
+static YV12_BUFFER_CONFIG *get_av1_ref_frame_buffer(
+    AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag) {
   MV_REFERENCE_FRAME ref_frame = NONE;
   if (ref_frame_flag == VPX_LAST_FLAG)
     ref_frame = LAST_FRAME;
@@ -2071,9 +2071,9 @@ static YV12_BUFFER_CONFIG *get_vp10_ref_frame_buffer(
   return ref_frame == NONE ? NULL : get_ref_frame_buffer(cpi, ref_frame);
 }
 
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
                             YV12_BUFFER_CONFIG *sd) {
-  YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+  YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
   if (cfg) {
     aom_yv12_copy_frame(cfg, sd);
     return 0;
@@ -2082,9 +2082,9 @@ int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
   }
 }
 
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_set_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
                            YV12_BUFFER_CONFIG *sd) {
-  YV12_BUFFER_CONFIG *cfg = get_vp10_ref_frame_buffer(cpi, ref_frame_flag);
+  YV12_BUFFER_CONFIG *cfg = get_av1_ref_frame_buffer(cpi, ref_frame_flag);
   if (cfg) {
     aom_yv12_copy_frame(sd, cfg);
     return 0;
@@ -2093,7 +2093,7 @@ int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
   }
 }
 
-int vp10_update_entropy(VP10_COMP *cpi, int update) {
+int av1_update_entropy(AV1_COMP *cpi, int update) {
   cpi->ext_refresh_frame_context = update;
   cpi->ext_refresh_frame_context_pending = 1;
   return 0;
@@ -2104,7 +2104,7 @@ int vp10_update_entropy(VP10_COMP *cpi, int update) {
 // as YUV 420. We simply use the top-left pixels of the UV buffers, since we do
 // not denoise the UV channels at this time. If ever we implement UV channel
 // denoising we will have to modify this.
-void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
+void av1_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
   uint8_t *src = s->y_buffer;
   int h = s->y_height;
 
@@ -2132,12 +2132,12 @@ void vp10_write_yuv_frame_420(YV12_BUFFER_CONFIG *s, FILE *f) {
 #endif
 
 #ifdef OUTPUT_YUV_REC
-void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
+void av1_write_yuv_rec_frame(AV1_COMMON *cm) {
   YV12_BUFFER_CONFIG *s = cm->frame_to_show;
   uint8_t *src = s->y_buffer;
   int h = cm->height;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (s->flags & YV12_FLAG_HIGHBITDEPTH) {
     uint16_t *src16 = CONVERT_TO_SHORTPTR(s->y_buffer);
 
@@ -2165,7 +2165,7 @@ void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
     fflush(yuv_rec_file);
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   do {
     fwrite(src, s->y_width, 1, yuv_rec_file);
@@ -2192,14 +2192,14 @@ void vp10_write_yuv_rec_frame(VP10_COMMON *cm) {
 }
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
                                                 YV12_BUFFER_CONFIG *dst,
                                                 int bd) {
 #else
 static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
                                                 YV12_BUFFER_CONFIG *dst) {
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   // TODO(dkovalev): replace YV12_BUFFER_CONFIG with aom_image_t
   int i;
   const uint8_t *const srcs[3] = { src->y_buffer, src->u_buffer,
@@ -2217,30 +2217,30 @@ static void scale_and_extend_frame_nonnormative(const YV12_BUFFER_CONFIG *src,
                                dst->uv_crop_height };
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
-      vp10_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
+      av1_highbd_resize_plane(srcs[i], src_heights[i], src_widths[i],
                                src_strides[i], dsts[i], dst_heights[i],
                                dst_widths[i], dst_strides[i], bd);
     } else {
-      vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+      av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
                         dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
     }
 #else
-    vp10_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
+    av1_resize_plane(srcs[i], src_heights[i], src_widths[i], src_strides[i],
                       dsts[i], dst_heights[i], dst_widths[i], dst_strides[i]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
   aom_extend_frame_borders(dst);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                    YV12_BUFFER_CONFIG *dst, int bd) {
 #else
 static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                    YV12_BUFFER_CONFIG *dst) {
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   const int src_w = src->y_crop_width;
   const int src_h = src->y_crop_height;
   const int dst_w = dst->y_crop_width;
@@ -2250,7 +2250,7 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
   const int src_strides[3] = { src->y_stride, src->uv_stride, src->uv_stride };
   uint8_t *const dsts[3] = { dst->y_buffer, dst->u_buffer, dst->v_buffer };
   const int dst_strides[3] = { dst->y_stride, dst->uv_stride, dst->uv_stride };
-  const InterpKernel *const kernel = vp10_filter_kernels[EIGHTTAP];
+  const InterpKernel *const kernel = av1_filter_kernels[EIGHTTAP];
   int x, y, i;
 
   for (y = 0; y < dst_h; y += 16) {
@@ -2266,7 +2266,7 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                  (x / factor) * src_w / dst_w;
         uint8_t *dst_ptr = dsts[i] + (y / factor) * dst_stride + (x / factor);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
           aom_highbd_convolve8(src_ptr, src_stride, dst_ptr, dst_stride,
                                kernel[x_q4 & 0xf], 16 * src_w / dst_w,
@@ -2283,7 +2283,7 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                       kernel[x_q4 & 0xf], 16 * src_w / dst_w,
                       kernel[y_q4 & 0xf], 16 * src_h / dst_h, 16 / factor,
                       16 / factor);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -2291,7 +2291,7 @@ static void scale_and_extend_frame(const YV12_BUFFER_CONFIG *src,
   aom_extend_frame_borders(dst);
 }
 
-static int scale_down(VP10_COMP *cpi, int q) {
+static int scale_down(AV1_COMP *cpi, int q) {
   RATE_CONTROL *const rc = &cpi->rc;
   GF_GROUP *const gf_group = &cpi->twopass.gf_group;
   int scale = 0;
@@ -2309,10 +2309,10 @@ static int scale_down(VP10_COMP *cpi, int q) {
 
 // Function to test for conditions that indicate we should loop
 // back and recode a frame.
-static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
+static int recode_loop_test(AV1_COMP *cpi, int high_limit, int low_limit,
                             int q, int maxq, int minq) {
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const int frame_is_kfgfarf = frame_is_kf_gf_arf(cpi);
   int force_recode = 0;
 
@@ -2342,8 +2342,8 @@ static int recode_loop_test(VP10_COMP *cpi, int high_limit, int low_limit,
   return force_recode;
 }
 
-void vp10_update_reference_frames(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_update_reference_frames(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   BufferPool *const pool = cm->buffer_pool;
 
   // At this point the new frame has been encoded.
@@ -2353,10 +2353,10 @@ void vp10_update_reference_frames(VP10_COMP *cpi) {
                cm->new_fb_idx);
     ref_cnt_fb(pool->frame_bufs, &cm->ref_frame_map[cpi->alt_fb_idx],
                cm->new_fb_idx);
-  } else if (vp10_preserve_existing_gf(cpi)) {
+  } else if (av1_preserve_existing_gf(cpi)) {
     // We have decided to preserve the previously existing golden frame as our
     // new ARF frame. However, in the short term in function
-    // vp10_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
+    // av1_bitstream.c::get_refresh_mask() we left it in the GF slot and, if
     // we're updating the GF with the current decoded frame, we save it to the
     // ARF slot instead.
     // We now have to update the ARF with the current frame and swap gld_fb_idx
@@ -2408,7 +2408,7 @@ void vp10_update_reference_frames(VP10_COMP *cpi) {
   }
 }
 
-static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
+static void loopfilter_frame(AV1_COMP *cpi, AV1_COMMON *cm) {
   MACROBLOCKD *xd = &cpi->td.mb.e_mbd;
   struct loopfilter *lf = &cm->lf;
   if (is_lossless_requested(&cpi->oxcf)) {
@@ -2420,7 +2420,7 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
 
     aom_usec_timer_start(&timer);
 
-    vp10_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
+    av1_pick_filter_level(cpi->Source, cpi, cpi->sf.lpf_pick);
 
     aom_usec_timer_mark(&timer);
     cpi->time_pick_lpf += aom_usec_timer_elapsed(&timer);
@@ -2428,20 +2428,20 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
 
   if (lf->filter_level > 0) {
     if (cpi->num_workers > 1)
-      vp10_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
+      av1_loop_filter_frame_mt(cm->frame_to_show, cm, xd->plane,
                                 lf->filter_level, 0, 0, cpi->workers,
                                 cpi->num_workers, &cpi->lf_row_sync);
     else
-      vp10_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
+      av1_loop_filter_frame(cm->frame_to_show, cm, xd, lf->filter_level, 0, 0);
   }
 
 #if CONFIG_DERING
   if (is_lossless_requested(&cpi->oxcf)) {
     cm->dering_level = 0;
   } else {
-    cm->dering_level = vp10_dering_search(cm->frame_to_show, cpi->Source, cm,
+    cm->dering_level = av1_dering_search(cm->frame_to_show, cpi->Source, cm,
                                           xd);
-    vp10_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
+    av1_dering_frame(cm->frame_to_show, cm, xd, cm->dering_level);
   }
 #endif  // CONFIG_DERING
 
@@ -2454,7 +2454,7 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
     // TODO(yaowu): investigate per-segment CLPF decision and
     // an optimal threshold, use 80 for now.
     for (i = 0; i < MAX_SEGMENTS; i++)
-      hq &= vp10_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
+      hq &= av1_get_qindex(&cm->seg, i, cm->base_qindex) < 80;
 
     if (!hq) {  // Don't try filter if the entire image is nearly losslessly
                 // encoded
@@ -2470,7 +2470,7 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
           get_sse(cpi->Source->v_buffer, cpi->Source->uv_stride,
                   cm->frame_to_show->v_buffer, cm->frame_to_show->uv_stride,
                   cpi->Source->uv_crop_width, cpi->Source->uv_crop_height);
-      vp10_clpf_frame(cm->frame_to_show, cm, xd);
+      av1_clpf_frame(cm->frame_to_show, cm, xd);
       after = get_sse(cpi->Source->y_buffer, cpi->Source->y_stride,
                       cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
                       cpi->Source->y_crop_width, cpi->Source->y_crop_height) +
@@ -2485,7 +2485,7 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
       before = get_sse(cpi->Source->y_buffer, cpi->Source->y_stride,
                        cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
                        cpi->Source->y_crop_width, cpi->Source->y_crop_height);
-      vp10_clpf_frame(cm->frame_to_show, cm, xd);
+      av1_clpf_frame(cm->frame_to_show, cm, xd);
       after = get_sse(cpi->Source->y_buffer, cpi->Source->y_stride,
                       cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
                       cpi->Source->y_crop_width, cpi->Source->y_crop_height);
@@ -2507,7 +2507,7 @@ static void loopfilter_frame(VP10_COMP *cpi, VP10_COMMON *cm) {
   aom_extend_frame_inner_borders(cm->frame_to_show);
 }
 
-static INLINE void alloc_frame_mvs(const VP10_COMMON *cm, int buffer_idx) {
+static INLINE void alloc_frame_mvs(const AV1_COMMON *cm, int buffer_idx) {
   RefCntBuffer *const new_fb_ptr = &cm->buffer_pool->frame_bufs[buffer_idx];
   if (new_fb_ptr->mvs == NULL || new_fb_ptr->mi_rows < cm->mi_rows ||
       new_fb_ptr->mi_cols < cm->mi_cols) {
@@ -2519,8 +2519,8 @@ static INLINE void alloc_frame_mvs(const VP10_COMMON *cm, int buffer_idx) {
   }
 }
 
-void vp10_scale_references(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+void av1_scale_references(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   MV_REFERENCE_FRAME ref_frame;
   const VPX_REFFRAME ref_mask[3] = { VPX_LAST_FLAG, VPX_GOLD_FLAG,
                                      VPX_ALT_FLAG };
@@ -2537,7 +2537,7 @@ void vp10_scale_references(VP10_COMP *cpi) {
         continue;
       }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (ref->y_crop_width != cm->width || ref->y_crop_height != cm->height) {
         RefCntBuffer *new_fb_ptr = NULL;
         int force_scaling = 0;
@@ -2579,7 +2579,7 @@ void vp10_scale_references(VP10_COMP *cpi) {
           cpi->scaled_ref_idx[ref_frame - 1] = new_fb;
           alloc_frame_mvs(cm, new_fb);
         }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       } else {
         const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
         RefCntBuffer *const buf = &pool->frame_bufs[buf_idx];
@@ -2594,8 +2594,8 @@ void vp10_scale_references(VP10_COMP *cpi) {
   }
 }
 
-static void release_scaled_references(VP10_COMP *cpi) {
-  VP10_COMMON *cm = &cpi->common;
+static void release_scaled_references(AV1_COMP *cpi) {
+  AV1_COMMON *cm = &cpi->common;
   int i;
   if (cpi->oxcf.pass == 0) {
     // Only release scaled references under certain conditions:
@@ -2640,8 +2640,8 @@ static void full_to_model_count(unsigned int *model_count,
   model_count[EOB_MODEL_TOKEN] = full_count[EOB_TOKEN];
 }
 
-static void full_to_model_counts(vp10_coeff_count_model *model_count,
-                                 vp10_coeff_count *full_count) {
+static void full_to_model_counts(av1_coeff_count_model *model_count,
+                                 av1_coeff_count *full_count) {
   int i, j, k, l;
 
   for (i = 0; i < PLANE_TYPES; ++i)
@@ -2652,14 +2652,14 @@ static void full_to_model_counts(vp10_coeff_count_model *model_count,
 }
 
 #if 0 && CONFIG_INTERNAL_STATS
-static void output_frame_level_debug_stats(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void output_frame_level_debug_stats(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   FILE *const f = fopen("tmp.stt", cm->current_video_frame ? "a" : "w");
   int64_t recon_err;
 
   aom_clear_system_state();
 
-  recon_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+  recon_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
 
   if (cpi->twopass.total_left_stats.coded_error != 0.0)
     fprintf(f, "%10u %dx%d  %10d %10d %d %d %10d %10d %10d %10d"
@@ -2686,12 +2686,12 @@ static void output_frame_level_debug_stats(VP10_COMP *cpi) {
         cpi->rc.total_target_vs_actual,
         (cpi->rc.starting_buffer_level - cpi->rc.bits_off_target),
         cpi->rc.total_actual_bits, cm->base_qindex,
-        vp10_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
-        (double)vp10_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
-        vp10_convert_qindex_to_q(cpi->twopass.active_worst_quality,
+        av1_convert_qindex_to_q(cm->base_qindex, cm->bit_depth),
+        (double)av1_dc_quant(cm->base_qindex, 0, cm->bit_depth) / 4.0,
+        av1_convert_qindex_to_q(cpi->twopass.active_worst_quality,
                                 cm->bit_depth),
         cpi->rc.avg_q,
-        vp10_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
+        av1_convert_qindex_to_q(cpi->oxcf.cq_level, cm->bit_depth),
         cpi->refresh_last_frame, cpi->refresh_golden_frame,
         cpi->refresh_alt_ref_frame, cm->frame_type, cpi->rc.gfu_boost,
         cpi->twopass.bits_left,
@@ -2722,12 +2722,12 @@ static void output_frame_level_debug_stats(VP10_COMP *cpi) {
 }
 #endif
 
-static void set_mv_search_params(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+static void set_mv_search_params(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   const unsigned int max_mv_def = VPXMIN(cm->width, cm->height);
 
   // Default based on max resolution.
-  cpi->mv_step_param = vp10_init_search_range(max_mv_def);
+  cpi->mv_step_param = av1_init_search_range(max_mv_def);
 
   if (cpi->sf.mv.auto_mv_step_size) {
     if (frame_is_intra_only(cm)) {
@@ -2739,7 +2739,7 @@ static void set_mv_search_params(VP10_COMP *cpi) {
         // Allow mv_steps to correspond to twice the max mv magnitude found
         // in the previous frame, capped by the default max_mv_magnitude based
         // on resolution.
-        cpi->mv_step_param = vp10_init_search_range(
+        cpi->mv_step_param = av1_init_search_range(
             VPXMIN(max_mv_def, 2 * cpi->max_mv_magnitude));
       }
       cpi->max_mv_magnitude = 0;
@@ -2747,26 +2747,26 @@ static void set_mv_search_params(VP10_COMP *cpi) {
   }
 }
 
-static void set_size_independent_vars(VP10_COMP *cpi) {
-  vp10_set_speed_features_framesize_independent(cpi);
-  vp10_set_rd_speed_thresholds(cpi);
-  vp10_set_rd_speed_thresholds_sub8x8(cpi);
+static void set_size_independent_vars(AV1_COMP *cpi) {
+  av1_set_speed_features_framesize_independent(cpi);
+  av1_set_rd_speed_thresholds(cpi);
+  av1_set_rd_speed_thresholds_sub8x8(cpi);
   cpi->common.interp_filter = cpi->sf.default_interp_filter;
 }
 
-static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
+static void set_size_dependent_vars(AV1_COMP *cpi, int *q, int *bottom_index,
                                     int *top_index) {
-  VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
 
   // Setup variables that depend on the dimensions of the frame.
-  vp10_set_speed_features_framesize_dependent(cpi);
+  av1_set_speed_features_framesize_dependent(cpi);
 
   // Decide q and q bounds.
-  *q = vp10_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
+  *q = av1_rc_pick_q_and_bounds(cpi, bottom_index, top_index);
 
   if (!frame_is_intra_only(cm)) {
-    vp10_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
+    av1_set_high_precision_mv(cpi, (*q) < HIGH_PRECISION_MV_QTHRESH);
   }
 
   // Configure experimental use of segmentation for enhanced coding of
@@ -2777,30 +2777,30 @@ static void set_size_dependent_vars(VP10_COMP *cpi, int *q, int *bottom_index,
     configure_static_seg_features(cpi);
 }
 
-static void init_motion_estimation(VP10_COMP *cpi) {
+static void init_motion_estimation(AV1_COMP *cpi) {
   int y_stride = cpi->scaled_source.y_stride;
 
   if (cpi->sf.mv.search_method == NSTEP) {
-    vp10_init3smotion_compensation(&cpi->ss_cfg, y_stride);
+    av1_init3smotion_compensation(&cpi->ss_cfg, y_stride);
   } else if (cpi->sf.mv.search_method == DIAMOND) {
-    vp10_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
+    av1_init_dsmotion_compensation(&cpi->ss_cfg, y_stride);
   }
 }
 
-static void set_frame_size(VP10_COMP *cpi) {
+static void set_frame_size(AV1_COMP *cpi) {
   int ref_frame;
-  VP10_COMMON *const cm = &cpi->common;
-  VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
+  AV1EncoderConfig *const oxcf = &cpi->oxcf;
   MACROBLOCKD *const xd = &cpi->td.mb.e_mbd;
 
   if (oxcf->pass == 2 && oxcf->rc_mode == VPX_VBR &&
       ((oxcf->resize_mode == RESIZE_FIXED && cm->current_video_frame == 0) ||
        (oxcf->resize_mode == RESIZE_DYNAMIC && cpi->resize_pending))) {
-    vp10_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
+    av1_calculate_coded_size(cpi, &oxcf->scaled_frame_width,
                               &oxcf->scaled_frame_height);
 
     // There has been a change in frame size.
-    vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+    av1_set_size_literal(cpi, oxcf->scaled_frame_width,
                           oxcf->scaled_frame_height);
   }
 
@@ -2818,7 +2818,7 @@ static void set_frame_size(VP10_COMP *cpi) {
     }
     if (cpi->resize_pending != 0) {
       // There has been a change in frame size.
-      vp10_set_size_literal(cpi, oxcf->scaled_frame_width,
+      av1_set_size_literal(cpi, oxcf->scaled_frame_width,
                             oxcf->scaled_frame_height);
 
       // TODO(agrange) Scale cpi->max_mv_magnitude if frame-size has changed.
@@ -2827,7 +2827,7 @@ static void set_frame_size(VP10_COMP *cpi) {
   }
 
   if (oxcf->pass == 2) {
-    vp10_set_target_rate(cpi);
+    av1_set_target_rate(cpi);
   }
 
   alloc_frame_mvs(cm, cm->new_fb_idx);
@@ -2835,7 +2835,7 @@ static void set_frame_size(VP10_COMP *cpi) {
   // Reset the frame pointers to the current frame size.
   aom_realloc_frame_buffer(get_frame_new_buffer(cm), cm->width, cm->height,
                            cm->subsampling_x, cm->subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                            cm->use_highbitdepth,
 #endif
                            VPX_ENC_BORDER_IN_PIXELS, cm->byte_alignment, NULL,
@@ -2853,16 +2853,16 @@ static void set_frame_size(VP10_COMP *cpi) {
     if (buf_idx != INVALID_IDX) {
       YV12_BUFFER_CONFIG *const buf = &cm->buffer_pool->frame_bufs[buf_idx].buf;
       ref_buf->buf = buf;
-#if CONFIG_VPX_HIGHBITDEPTH
-      vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+      av1_setup_scale_factors_for_frame(
           &ref_buf->sf, buf->y_crop_width, buf->y_crop_height, cm->width,
           cm->height, (buf->flags & YV12_FLAG_HIGHBITDEPTH) ? 1 : 0);
 #else
-      vp10_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
+      av1_setup_scale_factors_for_frame(&ref_buf->sf, buf->y_crop_width,
                                          buf->y_crop_height, cm->width,
                                          cm->height);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
-      if (vp10_is_scaled(&ref_buf->sf)) aom_extend_frame_borders(buf);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+      if (av1_is_scaled(&ref_buf->sf)) aom_extend_frame_borders(buf);
     } else {
       ref_buf->buf = NULL;
     }
@@ -2871,8 +2871,8 @@ static void set_frame_size(VP10_COMP *cpi) {
   set_ref_ptrs(cm, xd, LAST_FRAME, LAST_FRAME);
 }
 
-static void encode_without_recode_loop(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void encode_without_recode_loop(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int q = 0, bottom_index = 0, top_index = 0;  // Dummy variables.
 
   aom_clear_system_state();
@@ -2885,28 +2885,28 @@ static void encode_without_recode_loop(VP10_COMP *cpi) {
       cpi->oxcf.resize_mode == RESIZE_DYNAMIC &&
       cpi->un_scaled_source->y_width == (cm->width << 1) &&
       cpi->un_scaled_source->y_height == (cm->height << 1)) {
-    cpi->Source = vp10_scale_if_required_fast(cm, cpi->un_scaled_source,
+    cpi->Source = av1_scale_if_required_fast(cm, cpi->un_scaled_source,
                                               &cpi->scaled_source);
     if (cpi->unscaled_last_source != NULL)
-      cpi->Last_Source = vp10_scale_if_required_fast(
+      cpi->Last_Source = av1_scale_if_required_fast(
           cm, cpi->unscaled_last_source, &cpi->scaled_last_source);
   } else {
     cpi->Source =
-        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+        av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
     if (cpi->unscaled_last_source != NULL)
-      cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
+      cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
                                                 &cpi->scaled_last_source);
   }
 
   if (frame_is_intra_only(cm) == 0) {
-    vp10_scale_references(cpi);
+    av1_scale_references(cpi);
   }
 
   set_size_independent_vars(cpi);
   set_size_dependent_vars(cpi, &q, &bottom_index, &top_index);
 
-  vp10_set_quantizer(cm, q);
-  vp10_set_variance_partition_thresholds(cpi, q);
+  av1_set_quantizer(cm, q);
+  av1_set_variance_partition_thresholds(cpi, q);
 
   setup_frame(cpi);
 
@@ -2914,22 +2914,22 @@ static void encode_without_recode_loop(VP10_COMP *cpi) {
   // Variance adaptive and in frame q adjustment experiments are mutually
   // exclusive.
   if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
-    vp10_vaq_frame_setup(cpi);
+    av1_vaq_frame_setup(cpi);
   } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
-    vp10_setup_in_frame_q_adj(cpi);
+    av1_setup_in_frame_q_adj(cpi);
   } else if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ) {
-    vp10_cyclic_refresh_setup(cpi);
+    av1_cyclic_refresh_setup(cpi);
   }
   apply_active_map(cpi);
 
   // transform / motion compensation build reconstruction frame
-  vp10_encode_frame(cpi);
+  av1_encode_frame(cpi);
 
   // Update some stats from cyclic refresh, and check if we should not update
   // golden reference, for 1 pass CBR.
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->frame_type != KEY_FRAME &&
       (cpi->oxcf.pass == 0 && cpi->oxcf.rc_mode == VPX_CBR))
-    vp10_cyclic_refresh_check_golden_update(cpi);
+    av1_cyclic_refresh_check_golden_update(cpi);
 
   // Update the skip mb flag probabilities based on the distribution
   // seen in the last encoder iteration.
@@ -2937,9 +2937,9 @@ static void encode_without_recode_loop(VP10_COMP *cpi) {
   aom_clear_system_state();
 }
 
-static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
+static void encode_with_recode_loop(AV1_COMP *cpi, size_t *size,
                                     uint8_t *dest) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int bottom_index, top_index;
   int loop_count = 0;
@@ -2979,39 +2979,39 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
 
     // Decide frame size bounds first time through.
     if (loop_count == 0) {
-      vp10_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
+      av1_rc_compute_frame_size_bounds(cpi, rc->this_frame_target,
                                         &frame_under_shoot_limit,
                                         &frame_over_shoot_limit);
     }
 
     cpi->Source =
-        vp10_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
+        av1_scale_if_required(cm, cpi->un_scaled_source, &cpi->scaled_source);
 
     if (cpi->unscaled_last_source != NULL)
-      cpi->Last_Source = vp10_scale_if_required(cm, cpi->unscaled_last_source,
+      cpi->Last_Source = av1_scale_if_required(cm, cpi->unscaled_last_source,
                                                 &cpi->scaled_last_source);
 
     if (frame_is_intra_only(cm) == 0) {
       if (loop_count > 0) {
         release_scaled_references(cpi);
       }
-      vp10_scale_references(cpi);
+      av1_scale_references(cpi);
     }
 
-    vp10_set_quantizer(cm, q);
+    av1_set_quantizer(cm, q);
 
     if (loop_count == 0) setup_frame(cpi);
 
     // Variance adaptive and in frame q adjustment experiments are mutually
     // exclusive.
     if (cpi->oxcf.aq_mode == VARIANCE_AQ) {
-      vp10_vaq_frame_setup(cpi);
+      av1_vaq_frame_setup(cpi);
     } else if (cpi->oxcf.aq_mode == COMPLEXITY_AQ) {
-      vp10_setup_in_frame_q_adj(cpi);
+      av1_setup_in_frame_q_adj(cpi);
     }
 
     // transform / motion compensation build reconstruction frame
-    vp10_encode_frame(cpi);
+    av1_encode_frame(cpi);
 
     // Update the skip mb flag probabilities based on the distribution
     // seen in the last encoder iteration.
@@ -3024,7 +3024,7 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
     // to recode.
     if (cpi->sf.recode_loop >= ALLOW_RECODE_KFARFGF) {
       save_coding_context(cpi);
-      vp10_pack_bitstream(cpi, dest, size);
+      av1_pack_bitstream(cpi, dest, size);
 
       rc->projected_frame_size = (int)(*size) << 3;
       restore_coding_context(cpi);
@@ -3043,15 +3043,15 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
         int64_t high_err_target = cpi->ambient_err;
         int64_t low_err_target = cpi->ambient_err >> 1;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cm->use_highbitdepth) {
-          kf_err = vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+          kf_err = av1_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
         } else {
-          kf_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+          kf_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
         }
 #else
-        kf_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+        kf_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         // Prevent possible divide by zero error below for perfect KF
         kf_err += !kf_err;
@@ -3119,19 +3119,19 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
 
           if (undershoot_seen || loop_at_this_size > 1) {
             // Update rate_correction_factor unless
-            vp10_rc_update_rate_correction_factors(cpi);
+            av1_rc_update_rate_correction_factors(cpi);
 
             q = (q_high + q_low + 1) / 2;
           } else {
             // Update rate_correction_factor unless
-            vp10_rc_update_rate_correction_factors(cpi);
+            av1_rc_update_rate_correction_factors(cpi);
 
-            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+            q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
                                    VPXMAX(q_high, top_index));
 
             while (q < q_low && retries < 10) {
-              vp10_rc_update_rate_correction_factors(cpi);
-              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+              av1_rc_update_rate_correction_factors(cpi);
+              q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
                                      VPXMAX(q_high, top_index));
               retries++;
             }
@@ -3143,11 +3143,11 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
           q_high = q > q_low ? q - 1 : q_low;
 
           if (overshoot_seen || loop_at_this_size > 1) {
-            vp10_rc_update_rate_correction_factors(cpi);
+            av1_rc_update_rate_correction_factors(cpi);
             q = (q_high + q_low) / 2;
           } else {
-            vp10_rc_update_rate_correction_factors(cpi);
-            q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+            av1_rc_update_rate_correction_factors(cpi);
+            q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
                                    top_index);
             // Special case reset for qlow for constrained quality.
             // This should only trigger where there is very substantial
@@ -3158,8 +3158,8 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
             }
 
             while (q > q_high && retries < 10) {
-              vp10_rc_update_rate_correction_factors(cpi);
-              q = vp10_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
+              av1_rc_update_rate_correction_factors(cpi);
+              q = av1_rc_regulate_q(cpi, rc->this_frame_target, bottom_index,
                                      top_index);
               retries++;
             }
@@ -3193,7 +3193,7 @@ static void encode_with_recode_loop(VP10_COMP *cpi, size_t *size,
   } while (loop);
 }
 
-static int get_ref_frame_flags(const VP10_COMP *cpi) {
+static int get_ref_frame_flags(const AV1_COMP *cpi) {
   const int *const map = cpi->common.ref_frame_map;
   const int gold_is_last = map[cpi->gld_fb_idx] == map[cpi->lst_fb_idx];
   const int alt_is_last = map[cpi->alt_fb_idx] == map[cpi->lst_fb_idx];
@@ -3211,9 +3211,9 @@ static int get_ref_frame_flags(const VP10_COMP *cpi) {
   return flags;
 }
 
-static void set_ext_overrides(VP10_COMP *cpi) {
+static void set_ext_overrides(AV1_COMP *cpi) {
   // Overrides the defaults with the externally supplied values with
-  // vp10_update_reference() and vp10_update_entropy() calls
+  // av1_update_reference() and av1_update_entropy() calls
   // Note: The overrides are valid only for the next frame passed
   // to encode_frame_to_data_rate() function
   if (cpi->ext_refresh_frame_context_pending) {
@@ -3228,7 +3228,7 @@ static void set_ext_overrides(VP10_COMP *cpi) {
   }
 }
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
                                                 YV12_BUFFER_CONFIG *unscaled,
                                                 YV12_BUFFER_CONFIG *scaled) {
   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
@@ -3242,24 +3242,24 @@ YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
   }
 }
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
                                            YV12_BUFFER_CONFIG *unscaled,
                                            YV12_BUFFER_CONFIG *scaled) {
   if (cm->mi_cols * MI_SIZE != unscaled->y_width ||
       cm->mi_rows * MI_SIZE != unscaled->y_height) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     scale_and_extend_frame_nonnormative(unscaled, scaled, (int)cm->bit_depth);
 #else
     scale_and_extend_frame_nonnormative(unscaled, scaled);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return scaled;
   } else {
     return unscaled;
   }
 }
 
-static void set_arf_sign_bias(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_arf_sign_bias(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int arf_sign_bias;
 
   if ((cpi->oxcf.pass == 2) && cpi->multi_arf_allowed) {
@@ -3274,7 +3274,7 @@ static void set_arf_sign_bias(VP10_COMP *cpi) {
   cm->ref_frame_sign_bias[ALTREF_FRAME] = arf_sign_bias;
 }
 
-static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
+static int setup_interp_filter_search_mask(AV1_COMP *cpi) {
   INTERP_FILTER ifilter;
   int ref_total[MAX_REF_FRAMES] = { 0 };
   MV_REFERENCE_FRAME ref;
@@ -3299,11 +3299,11 @@ static int setup_interp_filter_search_mask(VP10_COMP *cpi) {
   return mask;
 }
 
-static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
+static void encode_frame_to_data_rate(AV1_COMP *cpi, size_t *size,
                                       uint8_t *dest,
                                       unsigned int *frame_flags) {
-  VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   struct segmentation *const seg = &cm->seg;
   TX_SIZE t;
 
@@ -3322,7 +3322,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
   // Set various flags etc to special state if it is a key frame.
   if (frame_is_intra_only(cm)) {
     // Reset the loop filter deltas and segmentation map.
-    vp10_reset_segment_features(cm);
+    av1_reset_segment_features(cm);
 
     // If segmentation is enabled force a map update for key frames.
     if (seg->enabled) {
@@ -3349,8 +3349,8 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
   // Never drop on key frame.
   if (oxcf->pass == 0 && oxcf->rc_mode == VPX_CBR &&
       cm->frame_type != KEY_FRAME) {
-    if (vp10_rc_drop_frame(cpi)) {
-      vp10_rc_postencode_update_drop_frame(cpi);
+    if (av1_rc_drop_frame(cpi)) {
+      av1_rc_postencode_update_drop_frame(cpi);
       ++cm->current_video_frame;
       return;
     }
@@ -3371,7 +3371,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
 
 #ifdef OUTPUT_YUV_SKINMAP
   if (cpi->common.current_video_frame > 1) {
-    vp10_compute_skin_map(cpi, yuv_skinmap_file);
+    av1_compute_skin_map(cpi, yuv_skinmap_file);
   }
 #endif
 
@@ -3379,16 +3379,16 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
   // fixed interval. Note the reconstruction error if it is the frame before
   // the force key frame
   if (cpi->rc.next_key_frame_forced && cpi->rc.frames_to_key == 1) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (cm->use_highbitdepth) {
       cpi->ambient_err =
-          vp10_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+          av1_highbd_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
     } else {
-      cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+      cpi->ambient_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
     }
 #else
-    cpi->ambient_err = vp10_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+    cpi->ambient_err = av1_get_y_sse(cpi->Source, get_frame_new_buffer(cm));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   // If the encoder forced a KEY_FRAME decision
@@ -3404,32 +3404,32 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
   loopfilter_frame(cpi, cm);
 
   // build the bitstream
-  vp10_pack_bitstream(cpi, dest, size);
+  av1_pack_bitstream(cpi, dest, size);
 
   if (cm->seg.update_map) update_reference_segmentation_map(cpi);
 
   if (frame_is_intra_only(cm) == 0) {
     release_scaled_references(cpi);
   }
-  vp10_update_reference_frames(cpi);
+  av1_update_reference_frames(cpi);
 
   for (t = TX_4X4; t <= TX_32X32; t++)
     full_to_model_counts(cpi->td.counts->coef[t],
                          cpi->td.rd_counts.coef_counts[t]);
 
   if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
-    vp10_adapt_coef_probs(cm);
+    av1_adapt_coef_probs(cm);
 #if CONFIG_MISC_FIXES
-    vp10_adapt_intra_frame_probs(cm);
+    av1_adapt_intra_frame_probs(cm);
 #else
-    if (!frame_is_intra_only(cm)) vp10_adapt_intra_frame_probs(cm);
+    if (!frame_is_intra_only(cm)) av1_adapt_intra_frame_probs(cm);
 #endif
   }
 
   if (!frame_is_intra_only(cm)) {
     if (cm->refresh_frame_context == REFRESH_FRAME_CONTEXT_BACKWARD) {
-      vp10_adapt_inter_frame_probs(cm);
-      vp10_adapt_mv_probs(cm, cm->allow_high_precision_mv);
+      av1_adapt_inter_frame_probs(cm);
+      av1_adapt_mv_probs(cm, cm->allow_high_precision_mv);
     }
   }
 
@@ -3447,7 +3447,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
 
   cm->last_frame_type = cm->frame_type;
 
-  vp10_rc_postencode_update(cpi, *size);
+  av1_rc_postencode_update(cpi, *size);
 
 #if 0
   output_frame_level_debug_stats(cpi);
@@ -3474,7 +3474,7 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
   if (!cm->show_existing_frame) cm->last_show_frame = cm->show_frame;
 
   if (cm->show_frame) {
-    vp10_swap_mi_and_prev_mi(cm);
+    av1_swap_mi_and_prev_mi(cm);
     // Don't increment frame counters if this was an altref buffer
     // update not a real frame
     ++cm->current_video_frame;
@@ -3482,25 +3482,25 @@ static void encode_frame_to_data_rate(VP10_COMP *cpi, size_t *size,
   cm->prev_frame = cm->cur_frame;
 }
 
-static void Pass0Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass0Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
                         unsigned int *frame_flags) {
   if (cpi->oxcf.rc_mode == VPX_CBR) {
-    vp10_rc_get_one_pass_cbr_params(cpi);
+    av1_rc_get_one_pass_cbr_params(cpi);
   } else {
-    vp10_rc_get_one_pass_vbr_params(cpi);
+    av1_rc_get_one_pass_vbr_params(cpi);
   }
   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
 }
 
-static void Pass2Encode(VP10_COMP *cpi, size_t *size, uint8_t *dest,
+static void Pass2Encode(AV1_COMP *cpi, size_t *size, uint8_t *dest,
                         unsigned int *frame_flags) {
   cpi->allow_encode_breakout = ENCODE_BREAKOUT_ENABLED;
   encode_frame_to_data_rate(cpi, size, dest, frame_flags);
 
-  vp10_twopass_postencode_update(cpi);
+  av1_twopass_postencode_update(cpi);
 }
 
-static void init_ref_frame_bufs(VP10_COMMON *cm) {
+static void init_ref_frame_bufs(AV1_COMMON *cm) {
   int i;
   BufferPool *const pool = cm->buffer_pool;
   cm->new_fb_idx = INVALID_IDX;
@@ -3510,22 +3510,22 @@ static void init_ref_frame_bufs(VP10_COMMON *cm) {
   }
 }
 
-static void check_initial_width(VP10_COMP *cpi,
-#if CONFIG_VPX_HIGHBITDEPTH
+static void check_initial_width(AV1_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
                                 int use_highbitdepth,
 #endif
                                 int subsampling_x, int subsampling_y) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   if (!cpi->initial_width ||
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       cm->use_highbitdepth != use_highbitdepth ||
 #endif
       cm->subsampling_x != subsampling_x ||
       cm->subsampling_y != subsampling_y) {
     cm->subsampling_x = subsampling_x;
     cm->subsampling_y = subsampling_y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     cm->use_highbitdepth = use_highbitdepth;
 #endif
 
@@ -3541,27 +3541,27 @@ static void check_initial_width(VP10_COMP *cpi,
   }
 }
 
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
                            YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
                            int64_t end_time) {
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   struct aom_usec_timer timer;
   int res = 0;
   const int subsampling_x = sd->subsampling_x;
   const int subsampling_y = sd->subsampling_y;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int use_highbitdepth = sd->flags & YV12_FLAG_HIGHBITDEPTH;
   check_initial_width(cpi, use_highbitdepth, subsampling_x, subsampling_y);
 #else
   check_initial_width(cpi, subsampling_x, subsampling_y);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   aom_usec_timer_start(&timer);
 
-  if (vp10_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
-#if CONFIG_VPX_HIGHBITDEPTH
+  if (av1_lookahead_push(cpi->lookahead, sd, time_stamp, end_time,
+#if CONFIG_AOM_HIGHBITDEPTH
                           use_highbitdepth,
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
                           frame_flags))
     res = -1;
   aom_usec_timer_mark(&timer);
@@ -3583,8 +3583,8 @@ int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
   return res;
 }
 
-static int frame_is_reference(const VP10_COMP *cpi) {
-  const VP10_COMMON *cm = &cpi->common;
+static int frame_is_reference(const AV1_COMP *cpi) {
+  const AV1_COMMON *cm = &cpi->common;
 
   return cm->frame_type == KEY_FRAME || cpi->refresh_last_frame ||
          cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame ||
@@ -3593,7 +3593,7 @@ static int frame_is_reference(const VP10_COMP *cpi) {
          cm->seg.update_data;
 }
 
-static void adjust_frame_rate(VP10_COMP *cpi,
+static void adjust_frame_rate(AV1_COMP *cpi,
                               const struct lookahead_entry *source) {
   int64_t this_duration;
   int step = 0;
@@ -3614,7 +3614,7 @@ static void adjust_frame_rate(VP10_COMP *cpi,
 
   if (this_duration) {
     if (step) {
-      vp10_new_framerate(cpi, 10000000.0 / this_duration);
+      av1_new_framerate(cpi, 10000000.0 / this_duration);
     } else {
       // Average this frame's rate into the last second's average
       // frame rate. If we haven't seen 1 second yet, then average
@@ -3625,7 +3625,7 @@ static void adjust_frame_rate(VP10_COMP *cpi,
       avg_duration *= (interval - avg_duration + this_duration);
       avg_duration /= interval;
 
-      vp10_new_framerate(cpi, 10000000.0 / avg_duration);
+      av1_new_framerate(cpi, 10000000.0 / avg_duration);
     }
   }
   cpi->last_time_stamp_seen = source->ts_start;
@@ -3634,7 +3634,7 @@ static void adjust_frame_rate(VP10_COMP *cpi,
 
 // Returns 0 if this is not an alt ref else the offset of the source frame
 // used as the arf midpoint.
-static int get_arf_src_index(VP10_COMP *cpi) {
+static int get_arf_src_index(AV1_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   int arf_src_index = 0;
   if (is_altref_enabled(cpi)) {
@@ -3650,7 +3650,7 @@ static int get_arf_src_index(VP10_COMP *cpi) {
   return arf_src_index;
 }
 
-static void check_src_altref(VP10_COMP *cpi,
+static void check_src_altref(AV1_COMP *cpi,
                              const struct lookahead_entry *source) {
   RATE_CONTROL *const rc = &cpi->rc;
 
@@ -3674,7 +3674,7 @@ static void check_src_altref(VP10_COMP *cpi,
 }
 
 #if CONFIG_INTERNAL_STATS
-extern double vp10_get_blockiness(const unsigned char *img1, int img1_pitch,
+extern double av1_get_blockiness(const unsigned char *img1, int img1_pitch,
                                   const unsigned char *img2, int img2_pitch,
                                   int width, int height);
 
@@ -3688,11 +3688,11 @@ static void adjust_image_stat(double y, double u, double v, double all,
 }
 #endif  // CONFIG_INTERNAL_STATS
 
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
                              size_t *size, uint8_t *dest, int64_t *time_stamp,
                              int64_t *time_end, int flush) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
-  VP10_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1_COMMON *const cm = &cpi->common;
   BufferPool *const pool = cm->buffer_pool;
   RATE_CONTROL *const rc = &cpi->rc;
   struct aom_usec_timer cmptimer;
@@ -3704,7 +3704,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
 
   aom_usec_timer_start(&cmptimer);
 
-  vp10_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
+  av1_set_high_precision_mv(cpi, ALTREF_HIGH_PRECISION_MV);
 
   // Is multi-arf enabled.
   // Note that at the moment multi_arf is only configured for 2 pass VBR
@@ -3731,12 +3731,12 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   if (arf_src_index) {
     assert(arf_src_index <= rc->frames_to_key);
 
-    if ((source = vp10_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
+    if ((source = av1_lookahead_peek(cpi->lookahead, arf_src_index)) != NULL) {
       cpi->alt_ref_source = source;
 
       if (oxcf->arnr_max_frames > 0) {
         // Produce the filtered ARF frame.
-        vp10_temporal_filter(cpi, arf_src_index);
+        av1_temporal_filter(cpi, arf_src_index);
         aom_extend_frame_borders(&cpi->alt_ref_buffer);
         force_src_buffer = &cpi->alt_ref_buffer;
       }
@@ -3756,12 +3756,12 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   if (!source) {
     // Get last frame source.
     if (cm->current_video_frame > 0) {
-      if ((last_source = vp10_lookahead_peek(cpi->lookahead, -1)) == NULL)
+      if ((last_source = av1_lookahead_peek(cpi->lookahead, -1)) == NULL)
         return -1;
     }
 
     // Read in the source frame.
-    source = vp10_lookahead_pop(cpi->lookahead, flush);
+    source = av1_lookahead_pop(cpi->lookahead, flush);
 
     if (source != NULL) {
       cm->show_frame = 1;
@@ -3785,7 +3785,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   } else {
     *size = 0;
     if (flush && oxcf->pass == 1 && !cpi->twopass.first_pass_done) {
-      vp10_end_first_pass(cpi); /* get last stats packet */
+      av1_end_first_pass(cpi); /* get last stats packet */
       cpi->twopass.first_pass_done = 1;
     }
     return -1;
@@ -3830,7 +3830,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   cpi->frame_flags = *frame_flags;
 
   if (oxcf->pass == 2) {
-    vp10_rc_get_second_pass_params(cpi);
+    av1_rc_get_second_pass_params(cpi);
   } else if (oxcf->pass == 1) {
     set_frame_size(cpi);
   }
@@ -3847,7 +3847,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
 
   if (oxcf->pass == 1) {
     cpi->td.mb.e_mbd.lossless[0] = is_lossless_requested(oxcf);
-    vp10_first_pass(cpi, source);
+    av1_first_pass(cpi, source);
   } else if (oxcf->pass == 2) {
     Pass2Encode(cpi, size, dest, frame_flags);
   } else {
@@ -3886,12 +3886,12 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
         YV12_BUFFER_CONFIG *orig = cpi->Source;
         YV12_BUFFER_CONFIG *recon = cpi->common.frame_to_show;
         PSNR_STATS psnr;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         calc_highbd_psnr(orig, recon, &psnr, cpi->td.mb.e_mbd.bd,
                          cpi->oxcf.input_bit_depth);
 #else
         calc_psnr(orig, recon, &psnr);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         adjust_image_stat(psnr.psnr[1], psnr.psnr[2], psnr.psnr[3],
                           psnr.psnr[0], &cpi->psnr);
@@ -3903,7 +3903,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
           double frame_ssim2 = 0, weight = 0;
           aom_clear_system_state();
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (cm->use_highbitdepth) {
             frame_ssim2 =
                 aom_highbd_calc_ssim(orig, recon, &weight, (int)cm->bit_depth);
@@ -3912,7 +3912,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
           }
 #else
           frame_ssim2 = aom_calc_ssim(orig, recon, &weight);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
           cpi->worst_ssim = VPXMIN(cpi->worst_ssim, frame_ssim2);
           cpi->summed_quality += frame_ssim2 * weight;
@@ -3932,11 +3932,11 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
         }
       }
       if (cpi->b_calculate_blockiness) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (!cm->use_highbitdepth)
 #endif
         {
-          double frame_blockiness = vp10_get_blockiness(
+          double frame_blockiness = av1_get_blockiness(
               cpi->Source->y_buffer, cpi->Source->y_stride,
               cm->frame_to_show->y_buffer, cm->frame_to_show->y_stride,
               cpi->Source->y_width, cpi->Source->y_height);
@@ -3947,7 +3947,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
       }
 
       if (cpi->b_calculate_consistency) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (!cm->use_highbitdepth)
 #endif
         {
@@ -3969,7 +3969,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
 
       if (cpi->b_calculate_ssimg) {
         double y, u, v, frame_all;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (cm->use_highbitdepth) {
           frame_all = aom_highbd_calc_ssimg(cpi->Source, cm->frame_to_show, &y,
                                             &u, &v, (int)cm->bit_depth);
@@ -3979,10 +3979,10 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
         }
 #else
         frame_all = aom_calc_ssimg(cpi->Source, cm->frame_to_show, &y, &u, &v);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         adjust_image_stat(y, u, v, frame_all, &cpi->ssimg);
       }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (!cm->use_highbitdepth)
 #endif
       {
@@ -3992,7 +3992,7 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
         adjust_image_stat(y, u, v, frame_all, &cpi->fastssim);
         /* TODO(JBB): add 10/12 bit support */
       }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (!cm->use_highbitdepth)
 #endif
       {
@@ -4008,8 +4008,8 @@ int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
   return 0;
 }
 
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
-  VP10_COMMON *cm = &cpi->common;
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
+  AV1_COMMON *cm = &cpi->common;
 
   if (!cm->show_frame) {
     return -1;
@@ -4030,9 +4030,9 @@ int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest) {
   }
 }
 
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+int av1_set_internal_size(AV1_COMP *cpi, VPX_SCALING horiz_mode,
                            VPX_SCALING vert_mode) {
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   int hr = 0, hs = 0, vr = 0, vs = 0;
 
   if (horiz_mode > ONETWO || vert_mode > ONETWO) return -1;
@@ -4051,14 +4051,14 @@ int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
   return 0;
 }
 
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
                           unsigned int height) {
-  VP10_COMMON *cm = &cpi->common;
-#if CONFIG_VPX_HIGHBITDEPTH
+  AV1_COMMON *cm = &cpi->common;
+#if CONFIG_AOM_HIGHBITDEPTH
   check_initial_width(cpi, cm->use_highbitdepth, 1, 1);
 #else
   check_initial_width(cpi, 1, 1);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if (width) {
     cm->width = width;
@@ -4083,7 +4083,7 @@ int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
   return 0;
 }
 
-int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
+int64_t av1_get_y_sse(const YV12_BUFFER_CONFIG *a,
                        const YV12_BUFFER_CONFIG *b) {
   assert(a->y_crop_width == b->y_crop_width);
   assert(a->y_crop_height == b->y_crop_height);
@@ -4092,8 +4092,8 @@ int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
                  a->y_crop_width, a->y_crop_height);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
                               const YV12_BUFFER_CONFIG *b) {
   assert(a->y_crop_width == b->y_crop_width);
   assert(a->y_crop_height == b->y_crop_height);
@@ -4103,11 +4103,11 @@ int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
   return highbd_get_sse(a->y_buffer, a->y_stride, b->y_buffer, b->y_stride,
                         a->y_crop_width, a->y_crop_height);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-int vp10_get_quantizer(VP10_COMP *cpi) { return cpi->common.base_qindex; }
+int av1_get_quantizer(AV1_COMP *cpi) { return cpi->common.base_qindex; }
 
-void vp10_apply_encoding_flags(VP10_COMP *cpi, aom_enc_frame_flags_t flags) {
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags) {
   if (flags &
       (VP8_EFLAG_NO_REF_LAST | VP8_EFLAG_NO_REF_GF | VP8_EFLAG_NO_REF_ARF)) {
     int ref = 7;
@@ -4118,7 +4118,7 @@ void vp10_apply_encoding_flags(VP10_COMP *cpi, aom_enc_frame_flags_t flags) {
 
     if (flags & VP8_EFLAG_NO_REF_ARF) ref ^= VPX_ALT_FLAG;
 
-    vp10_use_as_reference(cpi, ref);
+    av1_use_as_reference(cpi, ref);
   }
 
   if (flags &
@@ -4132,10 +4132,10 @@ void vp10_apply_encoding_flags(VP10_COMP *cpi, aom_enc_frame_flags_t flags) {
 
     if (flags & VP8_EFLAG_NO_UPD_ARF) upd ^= VPX_ALT_FLAG;
 
-    vp10_update_reference(cpi, upd);
+    av1_update_reference(cpi, upd);
   }
 
   if (flags & VP8_EFLAG_NO_UPD_ENTROPY) {
-    vp10_update_entropy(cpi, 0);
+    av1_update_entropy(cpi, 0);
   }
 }
diff --git a/av1/encoder/encoder.h b/av1/encoder/encoder.h
index 3af900d9b84fd4b5bde1894b566570b6c37515e8..81ef833987dd5ee37300f9f58d014024b4a6d346 100644
--- a/av1/encoder/encoder.h
+++ b/av1/encoder/encoder.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_ENCODER_H_
-#define VP10_ENCODER_ENCODER_H_
+#ifndef AV1_ENCODER_ENCODER_H_
+#define AV1_ENCODER_ENCODER_H_
 
 #include <stdio.h>
 
@@ -117,7 +117,7 @@ typedef enum {
   RESIZE_DYNAMIC = 2  // Coded size of each frame is determined by the codec.
 } RESIZE_TYPE;
 
-typedef struct VP10EncoderConfig {
+typedef struct AV1EncoderConfig {
   BITSTREAM_PROFILE profile;
   aom_bit_depth_t bit_depth;     // Codec bit-depth.
   int width;                     // width of data passed to the compressor
@@ -226,16 +226,16 @@ typedef struct VP10EncoderConfig {
 
   aom_tune_metric tuning;
   aom_tune_content content;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int use_highbitdepth;
 #endif
   aom_color_space_t color_space;
   int color_range;
   int render_width;
   int render_height;
-} VP10EncoderConfig;
+} AV1EncoderConfig;
 
-static INLINE int is_lossless_requested(const VP10EncoderConfig *cfg) {
+static INLINE int is_lossless_requested(const AV1EncoderConfig *cfg) {
   return cfg->best_allowed_q == 0 && cfg->worst_allowed_q == 0;
 }
 
@@ -247,7 +247,7 @@ typedef struct TileDataEnc {
 } TileDataEnc;
 
 typedef struct RD_COUNTS {
-  vp10_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
+  av1_coeff_count coef_counts[TX_SIZES][PLANE_TYPES];
   int64_t comp_pred_diff[REFERENCE_MODES];
   int64_t filter_diff[SWITCHABLE_FILTER_CONTEXTS];
   int m_search_count;
@@ -279,14 +279,14 @@ typedef struct IMAGE_STAT {
   double worst;
 } ImageStat;
 
-typedef struct VP10_COMP {
+typedef struct AV1_COMP {
   QUANTS quants;
   ThreadData td;
   MB_MODE_INFO_EXT *mbmi_ext_base;
   DECLARE_ALIGNED(16, int16_t, y_dequant[QINDEX_RANGE][8]);
   DECLARE_ALIGNED(16, int16_t, uv_dequant[QINDEX_RANGE][8]);
-  VP10_COMMON common;
-  VP10EncoderConfig oxcf;
+  AV1_COMMON common;
+  AV1EncoderConfig oxcf;
   struct lookahead_ctx *lookahead;
   struct lookahead_entry *alt_ref_source;
 
@@ -376,8 +376,8 @@ typedef struct VP10_COMP {
   ActiveMap active_map;
 
   fractional_mv_step_fp *find_fractional_mv_step;
-  vp10_full_search_fn_t full_search_sad;
-  vp10_diamond_search_fn_t diamond_search_sad;
+  av1_full_search_fn_t full_search_sad;
+  av1_diamond_search_fn_t diamond_search_sad;
   aom_variance_fn_ptr_t fn_ptr[BLOCK_SIZES];
   uint64_t time_receive_data;
   uint64_t time_compress_data;
@@ -484,59 +484,59 @@ typedef struct VP10_COMP {
   int num_workers;
   VPxWorker *workers;
   struct EncWorkerData *tile_thr_data;
-  VP10LfSync lf_row_sync;
-} VP10_COMP;
+  AV1LfSync lf_row_sync;
+} AV1_COMP;
 
-void vp10_initialize_enc(void);
+void av1_initialize_enc(void);
 
-struct VP10_COMP *vp10_create_compressor(VP10EncoderConfig *oxcf,
+struct AV1_COMP *av1_create_compressor(AV1EncoderConfig *oxcf,
                                          BufferPool *const pool);
-void vp10_remove_compressor(VP10_COMP *cpi);
+void av1_remove_compressor(AV1_COMP *cpi);
 
-void vp10_change_config(VP10_COMP *cpi, const VP10EncoderConfig *oxcf);
+void av1_change_config(AV1_COMP *cpi, const AV1EncoderConfig *oxcf);
 
 // receive a frames worth of data. caller can assume that a copy of this
 // frame is made and not just a copy of the pointer..
-int vp10_receive_raw_frame(VP10_COMP *cpi, unsigned int frame_flags,
+int av1_receive_raw_frame(AV1_COMP *cpi, unsigned int frame_flags,
                            YV12_BUFFER_CONFIG *sd, int64_t time_stamp,
                            int64_t end_time_stamp);
 
-int vp10_get_compressed_data(VP10_COMP *cpi, unsigned int *frame_flags,
+int av1_get_compressed_data(AV1_COMP *cpi, unsigned int *frame_flags,
                              size_t *size, uint8_t *dest, int64_t *time_stamp,
                              int64_t *time_end, int flush);
 
-int vp10_get_preview_raw_frame(VP10_COMP *cpi, YV12_BUFFER_CONFIG *dest);
+int av1_get_preview_raw_frame(AV1_COMP *cpi, YV12_BUFFER_CONFIG *dest);
 
-int vp10_use_as_reference(VP10_COMP *cpi, int ref_frame_flags);
+int av1_use_as_reference(AV1_COMP *cpi, int ref_frame_flags);
 
-void vp10_update_reference(VP10_COMP *cpi, int ref_frame_flags);
+void av1_update_reference(AV1_COMP *cpi, int ref_frame_flags);
 
-int vp10_copy_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_copy_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
                             YV12_BUFFER_CONFIG *sd);
 
-int vp10_set_reference_enc(VP10_COMP *cpi, VPX_REFFRAME ref_frame_flag,
+int av1_set_reference_enc(AV1_COMP *cpi, VPX_REFFRAME ref_frame_flag,
                            YV12_BUFFER_CONFIG *sd);
 
-int vp10_update_entropy(VP10_COMP *cpi, int update);
+int av1_update_entropy(AV1_COMP *cpi, int update);
 
-int vp10_set_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_set_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
 
-int vp10_get_active_map(VP10_COMP *cpi, unsigned char *map, int rows, int cols);
+int av1_get_active_map(AV1_COMP *cpi, unsigned char *map, int rows, int cols);
 
-int vp10_set_internal_size(VP10_COMP *cpi, VPX_SCALING horiz_mode,
+int av1_set_internal_size(AV1_COMP *cpi, VPX_SCALING horiz_mode,
                            VPX_SCALING vert_mode);
 
-int vp10_set_size_literal(VP10_COMP *cpi, unsigned int width,
+int av1_set_size_literal(AV1_COMP *cpi, unsigned int width,
                           unsigned int height);
 
-int vp10_get_quantizer(struct VP10_COMP *cpi);
+int av1_get_quantizer(struct AV1_COMP *cpi);
 
-static INLINE int frame_is_kf_gf_arf(const VP10_COMP *cpi) {
+static INLINE int frame_is_kf_gf_arf(const AV1_COMP *cpi) {
   return frame_is_intra_only(&cpi->common) || cpi->refresh_alt_ref_frame ||
          (cpi->refresh_golden_frame && !cpi->rc.is_src_frame_alt_ref);
 }
 
-static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi,
+static INLINE int get_ref_frame_map_idx(const AV1_COMP *cpi,
                                         MV_REFERENCE_FRAME ref_frame) {
   if (ref_frame == LAST_FRAME) {
     return cpi->lst_fb_idx;
@@ -547,16 +547,16 @@ static INLINE int get_ref_frame_map_idx(const VP10_COMP *cpi,
   }
 }
 
-static INLINE int get_ref_frame_buf_idx(const VP10_COMP *const cpi,
+static INLINE int get_ref_frame_buf_idx(const AV1_COMP *const cpi,
                                         int ref_frame) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const int map_idx = get_ref_frame_map_idx(cpi, ref_frame);
   return (map_idx != INVALID_IDX) ? cm->ref_frame_map[map_idx] : INVALID_IDX;
 }
 
 static INLINE YV12_BUFFER_CONFIG *get_ref_frame_buffer(
-    VP10_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+    AV1_COMP *cpi, MV_REFERENCE_FRAME ref_frame) {
+  AV1_COMMON *const cm = &cpi->common;
   const int buf_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   return buf_idx != INVALID_IDX ? &cm->buffer_pool->frame_bufs[buf_idx].buf
                                 : NULL;
@@ -580,37 +580,37 @@ static INLINE int allocated_tokens(TileInfo tile) {
   return get_token_alloc(tile_mb_rows, tile_mb_cols);
 }
 
-int64_t vp10_get_y_sse(const YV12_BUFFER_CONFIG *a,
+int64_t av1_get_y_sse(const YV12_BUFFER_CONFIG *a,
                        const YV12_BUFFER_CONFIG *b);
-#if CONFIG_VPX_HIGHBITDEPTH
-int64_t vp10_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_get_y_sse(const YV12_BUFFER_CONFIG *a,
                               const YV12_BUFFER_CONFIG *b);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_alloc_compressor_data(VP10_COMP *cpi);
+void av1_alloc_compressor_data(AV1_COMP *cpi);
 
-void vp10_scale_references(VP10_COMP *cpi);
+void av1_scale_references(AV1_COMP *cpi);
 
-void vp10_update_reference_frames(VP10_COMP *cpi);
+void av1_update_reference_frames(AV1_COMP *cpi);
 
-void vp10_set_high_precision_mv(VP10_COMP *cpi, int allow_high_precision_mv);
+void av1_set_high_precision_mv(AV1_COMP *cpi, int allow_high_precision_mv);
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required_fast(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required_fast(AV1_COMMON *cm,
                                                 YV12_BUFFER_CONFIG *unscaled,
                                                 YV12_BUFFER_CONFIG *scaled);
 
-YV12_BUFFER_CONFIG *vp10_scale_if_required(VP10_COMMON *cm,
+YV12_BUFFER_CONFIG *av1_scale_if_required(AV1_COMMON *cm,
                                            YV12_BUFFER_CONFIG *unscaled,
                                            YV12_BUFFER_CONFIG *scaled);
 
-void vp10_apply_encoding_flags(VP10_COMP *cpi, aom_enc_frame_flags_t flags);
+void av1_apply_encoding_flags(AV1_COMP *cpi, aom_enc_frame_flags_t flags);
 
-static INLINE int is_altref_enabled(const VP10_COMP *const cpi) {
+static INLINE int is_altref_enabled(const AV1_COMP *const cpi) {
   return cpi->oxcf.mode != REALTIME && cpi->oxcf.lag_in_frames > 0 &&
          cpi->oxcf.enable_auto_arf;
 }
 
-static INLINE void set_ref_ptrs(VP10_COMMON *cm, MACROBLOCKD *xd,
+static INLINE void set_ref_ptrs(AV1_COMMON *cm, MACROBLOCKD *xd,
                                 MV_REFERENCE_FRAME ref0,
                                 MV_REFERENCE_FRAME ref1) {
   xd->block_refs[0] =
@@ -623,11 +623,11 @@ static INLINE int get_chessboard_index(const int frame_index) {
   return frame_index & 0x1;
 }
 
-static INLINE int *cond_cost_list(const struct VP10_COMP *cpi, int *cost_list) {
+static INLINE int *cond_cost_list(const struct AV1_COMP *cpi, int *cost_list) {
   return cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL;
 }
 
-void vp10_new_framerate(VP10_COMP *cpi, double framerate);
+void av1_new_framerate(AV1_COMP *cpi, double framerate);
 
 #define LAYER_IDS_TO_IDX(sl, tl, num_tl) ((sl) * (num_tl) + (tl))
 
@@ -635,4 +635,4 @@ void vp10_new_framerate(VP10_COMP *cpi, double framerate);
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ENCODER_H_
+#endif  // AV1_ENCODER_ENCODER_H_
diff --git a/av1/encoder/ethread.c b/av1/encoder/ethread.c
index 63956c4c41a37716abc8f23c1cfee87a9f533fda..371bf25983334a93091ade3d14f17c6f39c8677e 100644
--- a/av1/encoder/ethread.c
+++ b/av1/encoder/ethread.c
@@ -38,8 +38,8 @@ static void accumulate_rd_opt(ThreadData *td, ThreadData *td_t) {
 }
 
 static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
-  VP10_COMP *const cpi = thread_data->cpi;
-  const VP10_COMMON *const cm = &cpi->common;
+  AV1_COMP *const cpi = thread_data->cpi;
+  const AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = 1 << cm->log2_tile_cols;
   const int tile_rows = 1 << cm->log2_tile_rows;
   int t;
@@ -51,20 +51,20 @@ static int enc_worker_hook(EncWorkerData *const thread_data, void *unused) {
     int tile_row = t / tile_cols;
     int tile_col = t % tile_cols;
 
-    vp10_encode_tile(cpi, thread_data->td, tile_row, tile_col);
+    av1_encode_tile(cpi, thread_data->td, tile_row, tile_col);
   }
 
   return 0;
 }
 
-void vp10_encode_tiles_mt(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_encode_tiles_mt(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   const int tile_cols = 1 << cm->log2_tile_cols;
   const VPxWorkerInterface *const winterface = aom_get_worker_interface();
   const int num_workers = VPXMIN(cpi->oxcf.max_threads, tile_cols);
   int i;
 
-  vp10_init_tile_data(cpi);
+  av1_init_tile_data(cpi);
 
   // Only run once to create threads and allocate thread data.
   if (cpi->num_workers == 0) {
@@ -89,12 +89,12 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) {
         // Allocate thread data.
         CHECK_MEM_ERROR(cm, thread_data->td,
                         aom_memalign(32, sizeof(*thread_data->td)));
-        vp10_zero(*thread_data->td);
+        av1_zero(*thread_data->td);
 
         // Set up pc_tree.
         thread_data->td->leaf_tree = NULL;
         thread_data->td->pc_tree = NULL;
-        vp10_setup_pc_tree(cm, thread_data->td);
+        av1_setup_pc_tree(cm, thread_data->td);
 
         // Allocate frame counters in thread data.
         CHECK_MEM_ERROR(cm, thread_data->td->counts,
@@ -160,7 +160,7 @@ void vp10_encode_tiles_mt(VP10_COMP *cpi) {
 
     // Accumulate counters.
     if (i < cpi->num_workers - 1) {
-      vp10_accumulate_frame_counts(cm, thread_data->td->counts, 0);
+      av1_accumulate_frame_counts(cm, thread_data->td->counts, 0);
       accumulate_rd_opt(&cpi->td, thread_data->td);
     }
   }
diff --git a/av1/encoder/ethread.h b/av1/encoder/ethread.h
index 7403bd34c7ed4a6a07fd8302ac80669dbb143c47..6c30a3e5cf6e6d1c2aed2e9980c6815e12e9488e 100644
--- a/av1/encoder/ethread.h
+++ b/av1/encoder/ethread.h
@@ -9,26 +9,26 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_ETHREAD_H_
-#define VP10_ENCODER_ETHREAD_H_
+#ifndef AV1_ENCODER_ETHREAD_H_
+#define AV1_ENCODER_ETHREAD_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct ThreadData;
 
 typedef struct EncWorkerData {
-  struct VP10_COMP *cpi;
+  struct AV1_COMP *cpi;
   struct ThreadData *td;
   int start;
 } EncWorkerData;
 
-void vp10_encode_tiles_mt(struct VP10_COMP *cpi);
+void av1_encode_tiles_mt(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_ETHREAD_H_
+#endif  // AV1_ENCODER_ETHREAD_H_
diff --git a/av1/encoder/extend.c b/av1/encoder/extend.c
index efb6515cd3a1915f5385109cd3268fd1906cd147..c7aaa498cbd5e35451b7e8b5461829b63d401327 100644
--- a/av1/encoder/extend.c
+++ b/av1/encoder/extend.c
@@ -57,7 +57,7 @@ static void copy_and_extend_plane(const uint8_t *src, int src_pitch,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
                                          uint8_t *dst8, int dst_pitch, int w,
                                          int h, int extend_top, int extend_left,
@@ -100,9 +100,9 @@ static void highbd_copy_and_extend_plane(const uint8_t *src8, int src_pitch,
     dst_ptr2 += dst_pitch;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                 YV12_BUFFER_CONFIG *dst) {
   // Extend src frame in buffer
   // Altref filtering assumes 16 pixel extension
@@ -124,7 +124,7 @@ void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
   const int eb_uv = eb_y >> uv_height_subsampling;
   const int er_uv = er_y >> uv_width_subsampling;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (src->flags & YV12_FLAG_HIGHBITDEPTH) {
     highbd_copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
                                  dst->y_stride, src->y_crop_width,
@@ -139,7 +139,7 @@ void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
         src->uv_crop_width, src->uv_crop_height, et_uv, el_uv, eb_uv, er_uv);
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   copy_and_extend_plane(src->y_buffer, src->y_stride, dst->y_buffer,
                         dst->y_stride, src->y_crop_width, src->y_crop_height,
@@ -154,7 +154,7 @@ void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                         et_uv, el_uv, eb_uv, er_uv);
 }
 
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
                                           YV12_BUFFER_CONFIG *dst, int srcy,
                                           int srcx, int srch, int srcw) {
   // If the side is not touching the bounder then don't extend.
diff --git a/av1/encoder/extend.h b/av1/encoder/extend.h
index ba94a6df373590874a3b5a926bd525f9abd07f1d..ccd86549b882b5f39fa3a658ba8d0fdceadf9b5c 100644
--- a/av1/encoder/extend.h
+++ b/av1/encoder/extend.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_EXTEND_H_
-#define VP10_ENCODER_EXTEND_H_
+#ifndef AV1_ENCODER_EXTEND_H_
+#define AV1_ENCODER_EXTEND_H_
 
 #include "aom_scale/yv12config.h"
 #include "aom/aom_integer.h"
@@ -19,14 +19,14 @@
 extern "C" {
 #endif
 
-void vp10_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame(const YV12_BUFFER_CONFIG *src,
                                 YV12_BUFFER_CONFIG *dst);
 
-void vp10_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
+void av1_copy_and_extend_frame_with_rect(const YV12_BUFFER_CONFIG *src,
                                           YV12_BUFFER_CONFIG *dst, int srcy,
                                           int srcx, int srch, int srcw);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_EXTEND_H_
+#endif  // AV1_ENCODER_EXTEND_H_
diff --git a/av1/encoder/firstpass.c b/av1/encoder/firstpass.c
index 13b888d20937a145c0af938022a798e3febb3f33..2c22bbec348b4396e8d6b028cc009dd6b1a55d56 100644
--- a/av1/encoder/firstpass.c
+++ b/av1/encoder/firstpass.c
@@ -25,7 +25,7 @@
 
 #include "av1/common/entropymv.h"
 #include "av1/common/quant_common.h"
-#include "av1/common/reconinter.h"  // vp10_setup_dst_planes()
+#include "av1/common/reconinter.h"  // av1_setup_dst_planes()
 #include "av1/encoder/aq_variance.h"
 #include "av1/encoder/block.h"
 #include "av1/encoder/encodeframe.h"
@@ -127,7 +127,7 @@ static void output_stats(FIRSTPASS_STATS *stats,
 }
 
 #if CONFIG_FP_MB_STATS
-static void output_fpmb_stats(uint8_t *this_frame_mb_stats, VP10_COMMON *cm,
+static void output_fpmb_stats(uint8_t *this_frame_mb_stats, AV1_COMMON *cm,
                               struct aom_codec_pkt_list *pktlist) {
   struct aom_codec_cx_pkt pkt;
   pkt.kind = VPX_CODEC_FPMB_STATS_PKT;
@@ -218,7 +218,7 @@ static void subtract_stats(FIRSTPASS_STATS *section,
 // bars and partially discounts other 0 energy areas.
 #define MIN_ACTIVE_AREA 0.5
 #define MAX_ACTIVE_AREA 1.0
-static double calculate_active_area(const VP10_COMP *cpi,
+static double calculate_active_area(const AV1_COMP *cpi,
                                     const FIRSTPASS_STATS *this_frame) {
   double active_pct;
 
@@ -232,9 +232,9 @@ static double calculate_active_area(const VP10_COMP *cpi,
 // Calculate a modified Error used in distributing bits between easier and
 // harder frames.
 #define ACT_AREA_CORRECTION 0.5
-static double calculate_modified_err(const VP10_COMP *cpi,
+static double calculate_modified_err(const AV1_COMP *cpi,
                                      const TWO_PASS *twopass,
-                                     const VP10EncoderConfig *oxcf,
+                                     const AV1EncoderConfig *oxcf,
                                      const FIRSTPASS_STATS *this_frame) {
   const FIRSTPASS_STATS *const stats = &twopass->total_stats;
   const double av_weight = stats->weight / stats->count;
@@ -258,7 +258,7 @@ static double calculate_modified_err(const VP10_COMP *cpi,
 
 // This function returns the maximum target rate per frame.
 static int frame_max_bits(const RATE_CONTROL *rc,
-                          const VP10EncoderConfig *oxcf) {
+                          const AV1EncoderConfig *oxcf) {
   int64_t max_bits = ((int64_t)rc->avg_frame_bandwidth *
                       (int64_t)oxcf->two_pass_vbrmax_section) /
                      100;
@@ -270,11 +270,11 @@ static int frame_max_bits(const RATE_CONTROL *rc,
   return (int)max_bits;
 }
 
-void vp10_init_first_pass(VP10_COMP *cpi) {
+void av1_init_first_pass(AV1_COMP *cpi) {
   zero_stats(&cpi->twopass.total_stats);
 }
 
-void vp10_end_first_pass(VP10_COMP *cpi) {
+void av1_end_first_pass(AV1_COMP *cpi) {
   output_stats(&cpi->twopass.total_stats, cpi->output_pkt_list);
 }
 
@@ -296,7 +296,7 @@ static unsigned int get_prediction_error(BLOCK_SIZE bsize,
   return sse;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static aom_variance_fn_t highbd_get_block_variance_fn(BLOCK_SIZE bsize,
                                                       int bd) {
   switch (bd) {
@@ -336,11 +336,11 @@ static unsigned int highbd_get_prediction_error(BLOCK_SIZE bsize,
   fn(src->buf, src->stride, ref->buf, ref->stride, &sse);
   return sse;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 // Refine the motion search range according to the frame dimension
 // for first pass test.
-static int get_search_range(const VP10_COMP *cpi) {
+static int get_search_range(const AV1_COMP *cpi) {
   int sr = 0;
   const int dim = VPXMIN(cpi->initial_width, cpi->initial_height);
 
@@ -348,7 +348,7 @@ static int get_search_range(const VP10_COMP *cpi) {
   return sr;
 }
 
-static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void first_pass_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
                                      const MV *ref_mv, MV *best_mv,
                                      int *best_motion_err) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -367,18 +367,18 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
 
   // Override the default variance function to use MSE.
   v_fn_ptr.vf = get_block_variance_fn(bsize);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     v_fn_ptr.vf = highbd_get_block_variance_fn(bsize, xd->bd);
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Center the initial step/diamond search on best mv.
   tmp_err = cpi->diamond_search_sad(x, &cpi->ss_cfg, &ref_mv_full, &tmp_mv,
                                     step_param, x->sadperbit16, &num00,
                                     &v_fn_ptr, ref_mv);
   if (tmp_err < INT_MAX)
-    tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+    tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
   if (tmp_err < INT_MAX - new_mv_mode_penalty) tmp_err += new_mv_mode_penalty;
 
   if (tmp_err < *best_motion_err) {
@@ -400,7 +400,7 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
                                         step_param + n, x->sadperbit16, &num00,
                                         &v_fn_ptr, ref_mv);
       if (tmp_err < INT_MAX)
-        tmp_err = vp10_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
+        tmp_err = av1_get_mvpred_var(x, &tmp_mv, ref_mv, &v_fn_ptr, 1);
       if (tmp_err < INT_MAX - new_mv_mode_penalty)
         tmp_err += new_mv_mode_penalty;
 
@@ -412,7 +412,7 @@ static void first_pass_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   }
 }
 
-static BLOCK_SIZE get_bsize(const VP10_COMMON *cm, int mb_row, int mb_col) {
+static BLOCK_SIZE get_bsize(const AV1_COMMON *cm, int mb_row, int mb_col) {
   if (2 * mb_col + 1 < cm->mi_cols) {
     return 2 * mb_row + 1 < cm->mi_rows ? BLOCK_16X16 : BLOCK_16X8;
   } else {
@@ -424,15 +424,15 @@ static int find_fp_qindex(aom_bit_depth_t bit_depth) {
   int i;
 
   for (i = 0; i < QINDEX_RANGE; ++i)
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
+    if (av1_convert_qindex_to_q(i, bit_depth) >= FIRST_PASS_Q) break;
 
   if (i == QINDEX_RANGE) i--;
 
   return i;
 }
 
-static void set_first_pass_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void set_first_pass_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   if (!cpi->refresh_alt_ref_frame &&
       (cm->current_video_frame == 0 || (cpi->frame_flags & FRAMEFLAGS_KEY))) {
     cm->frame_type = KEY_FRAME;
@@ -445,10 +445,10 @@ static void set_first_pass_params(VP10_COMP *cpi) {
 
 #define UL_INTRA_THRESH 50
 #define INVALID_ROW -1
-void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
+void av1_first_pass(AV1_COMP *cpi, const struct lookahead_entry *source) {
   int mb_row, mb_col;
   MACROBLOCK *const x = &cpi->td.mb;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   TileInfo tile;
   struct macroblock_plane *const p = x->plane;
@@ -492,7 +492,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 
 #if CONFIG_FP_MB_STATS
   if (cpi->use_fp_mb_stats) {
-    vp10_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
+    av1_zero_array(cpi->twopass.frame_mb_stats_buf, cm->initial_mbs);
   }
 #endif
 
@@ -503,21 +503,21 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
   neutral_count = 0.0;
 
   set_first_pass_params(cpi);
-  vp10_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
+  av1_set_quantizer(cm, find_fp_qindex(cm->bit_depth));
 
-  vp10_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
+  av1_setup_block_planes(&x->e_mbd, cm->subsampling_x, cm->subsampling_y);
 
-  vp10_setup_src_planes(x, cpi->Source, 0, 0);
-  vp10_setup_dst_planes(xd->plane, new_yv12, 0, 0);
+  av1_setup_src_planes(x, cpi->Source, 0, 0);
+  av1_setup_dst_planes(xd->plane, new_yv12, 0, 0);
 
   if (!frame_is_intra_only(cm)) {
-    vp10_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
+    av1_setup_pre_planes(xd, 0, first_ref_buf, 0, 0, NULL);
   }
 
   xd->mi = cm->mi_grid_visible;
   xd->mi[0] = cm->mi;
 
-  vp10_frame_init_quantizer(cpi);
+  av1_frame_init_quantizer(cpi);
 
   for (i = 0; i < MAX_MB_PLANE; ++i) {
     p[i].coeff = ctx->coeff_pbuf[i][1];
@@ -527,11 +527,11 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
   }
   x->skip_recode = 0;
 
-  vp10_init_mv_probs(cm);
-  vp10_initialize_rd_consts(cpi);
+  av1_init_mv_probs(cm);
+  av1_initialize_rd_consts(cpi);
 
   // Tiling is ignored in the first pass.
-  vp10_tile_init(&tile, cm, 0, 0);
+  av1_tile_init(&tile, cm, 0, 0);
 
   recon_y_stride = new_yv12->y_stride;
   recon_uv_stride = new_yv12->uv_stride;
@@ -578,7 +578,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
       xd->mi[0]->mbmi.mode = DC_PRED;
       xd->mi[0]->mbmi.tx_size =
           use_dc_pred ? (bsize >= BLOCK_16X16 ? TX_16X16 : TX_8X8) : TX_4X4;
-      vp10_encode_intra_block_plane(x, bsize, 0);
+      av1_encode_intra_block_plane(x, bsize, 0);
       this_error = aom_get_mb_ss(x->plane[0].src_diff);
 
       // Keep a record of blocks that have almost no intra error residual
@@ -592,7 +592,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
         image_data_start_row = mb_row;
       }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         switch (cm->bit_depth) {
           case VPX_BITS_8: break;
@@ -605,7 +605,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
             return;
         }
       }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
       aom_clear_system_state();
       log_intra = log(this_error + 1.0);
@@ -614,7 +614,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
       else
         intra_factor += 1.0;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth)
         level_sample = CONVERT_TO_SHORTPTR(x->plane[0].src.buf)[0];
       else
@@ -659,7 +659,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
         struct buf_2d unscaled_last_source_buf_2d;
 
         xd->plane[0].pre[0].buf = first_ref_buf->y_buffer + recon_yoffset;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
           motion_error = highbd_get_prediction_error(
               bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -670,7 +670,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 #else
         motion_error =
             get_prediction_error(bsize, &x->plane[0].src, &xd->plane[0].pre[0]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         // Compute the motion error of the 0,0 motion using the last source
         // frame as the reference. Skip the further motion search on
@@ -679,7 +679,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
             cpi->unscaled_last_source->y_buffer + recon_yoffset;
         unscaled_last_source_buf_2d.stride =
             cpi->unscaled_last_source->y_stride;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
           raw_motion_error = highbd_get_prediction_error(
               bsize, &x->plane[0].src, &unscaled_last_source_buf_2d, xd->bd);
@@ -690,7 +690,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 #else
         raw_motion_error = get_prediction_error(bsize, &x->plane[0].src,
                                                 &unscaled_last_source_buf_2d);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
         // TODO(pengchong): Replace the hard-coded threshold
         if (raw_motion_error > 25) {
@@ -716,7 +716,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
             int gf_motion_error;
 
             xd->plane[0].pre[0].buf = gld_yv12->y_buffer + recon_yoffset;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
             if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
               gf_motion_error = highbd_get_prediction_error(
                   bsize, &x->plane[0].src, &xd->plane[0].pre[0], xd->bd);
@@ -727,7 +727,7 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
 #else
             gf_motion_error = get_prediction_error(bsize, &x->plane[0].src,
                                                    &xd->plane[0].pre[0]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
             first_pass_motion_search(cpi, x, &zero_mv, &tmp_mv,
                                      &gf_motion_error);
@@ -798,8 +798,8 @@ void vp10_first_pass(VP10_COMP *cpi, const struct lookahead_entry *source) {
           xd->mi[0]->mbmi.tx_size = TX_4X4;
           xd->mi[0]->mbmi.ref_frame[0] = LAST_FRAME;
           xd->mi[0]->mbmi.ref_frame[1] = NONE;
-          vp10_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
-          vp10_encode_sby_pass1(x, bsize);
+          av1_build_inter_predictors_sby(xd, mb_row << 1, mb_col << 1, bsize);
+          av1_encode_sby_pass1(x, bsize);
           sum_mvr += mv.row;
           sum_mvr_abs += abs(mv.row);
           sum_mvc += mv.col;
@@ -1047,7 +1047,7 @@ static double calc_correction_factor(double err_per_mb, double err_divisor,
 
   // Adjustment based on actual quantizer to power term.
   const double power_term =
-      VPXMIN(vp10_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
+      VPXMIN(av1_convert_qindex_to_q(q, bit_depth) * 0.01 + pt_low, pt_high);
 
   // Calculate correction factor.
   if (power_term < 1.0) assert(error_term >= 0.0);
@@ -1060,13 +1060,13 @@ static double calc_correction_factor(double err_per_mb, double err_divisor,
 // increased size and hence coding cost of motion vectors.
 #define EDIV_SIZE_FACTOR 800
 
-static int get_twopass_worst_quality(const VP10_COMP *cpi,
+static int get_twopass_worst_quality(const AV1_COMP *cpi,
                                      const double section_err,
                                      double inactive_zone,
                                      int section_target_bandwidth,
                                      double group_weight_factor) {
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
 
   inactive_zone = fclamp(inactive_zone, 0.0, 1.0);
 
@@ -1091,7 +1091,7 @@ static int get_twopass_worst_quality(const VP10_COMP *cpi,
       const double factor = calc_correction_factor(
           av_err_per_mb, ERR_DIVISOR - ediv_size_correction, FACTOR_PT_LOW,
           FACTOR_PT_HIGH, q, cpi->common.bit_depth);
-      const int bits_per_mb = vp10_rc_bits_per_mb(
+      const int bits_per_mb = av1_rc_bits_per_mb(
           INTER_FRAME, q, factor * speed_term * group_weight_factor,
           cpi->common.bit_depth);
       if (bits_per_mb <= target_norm_bits_per_mb) break;
@@ -1103,17 +1103,17 @@ static int get_twopass_worst_quality(const VP10_COMP *cpi,
   }
 }
 
-static void setup_rf_level_maxq(VP10_COMP *cpi) {
+static void setup_rf_level_maxq(AV1_COMP *cpi) {
   int i;
   RATE_CONTROL *const rc = &cpi->rc;
   for (i = INTER_NORMAL; i < RATE_FACTOR_LEVELS; ++i) {
-    int qdelta = vp10_frame_type_qdelta(cpi, i, rc->worst_quality);
+    int qdelta = av1_frame_type_qdelta(cpi, i, rc->worst_quality);
     rc->rf_level_maxq[i] = VPXMAX(rc->worst_quality + qdelta, rc->best_quality);
   }
 }
 
-void vp10_init_subsampling(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_init_subsampling(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   const int w = cm->width;
   const int h = cm->height;
@@ -1128,15 +1128,15 @@ void vp10_init_subsampling(VP10_COMP *cpi) {
   setup_rf_level_maxq(cpi);
 }
 
-void vp10_calculate_coded_size(VP10_COMP *cpi, int *scaled_frame_width,
+void av1_calculate_coded_size(AV1_COMP *cpi, int *scaled_frame_width,
                                int *scaled_frame_height) {
   RATE_CONTROL *const rc = &cpi->rc;
   *scaled_frame_width = rc->frame_width[rc->frame_size_selector];
   *scaled_frame_height = rc->frame_height[rc->frame_size_selector];
 }
 
-void vp10_init_second_pass(VP10_COMP *cpi) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_init_second_pass(AV1_COMP *cpi) {
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   TWO_PASS *const twopass = &cpi->twopass;
   double frame_rate;
   FIRSTPASS_STATS *stats;
@@ -1157,7 +1157,7 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
   // encoded in the second pass is a guess. However, the sum duration is not.
   // It is calculated based on the actual durations of all frames from the
   // first pass.
-  vp10_new_framerate(cpi, frame_rate);
+  av1_new_framerate(cpi, frame_rate);
   twopass->bits_left =
       (int64_t)(stats->duration * oxcf->target_bandwidth / 10000000.0);
 
@@ -1193,7 +1193,7 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
   twopass->last_kfgroup_zeromotion_pct = 100;
 
   if (oxcf->resize_mode != RESIZE_NONE) {
-    vp10_init_subsampling(cpi);
+    av1_init_subsampling(cpi);
   }
 }
 
@@ -1204,7 +1204,7 @@ void vp10_init_second_pass(VP10_COMP *cpi) {
 #define LOW_SR_DIFF_TRHESH 0.1
 #define SR_DIFF_MAX 128.0
 
-static double get_sr_decay_rate(const VP10_COMP *cpi,
+static double get_sr_decay_rate(const AV1_COMP *cpi,
                                 const FIRSTPASS_STATS *frame) {
   const int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
                                                              : cpi->common.MBs;
@@ -1233,7 +1233,7 @@ static double get_sr_decay_rate(const VP10_COMP *cpi,
 
 // This function gives an estimate of how badly we believe the prediction
 // quality is decaying from frame to frame.
-static double get_zero_motion_factor(const VP10_COMP *cpi,
+static double get_zero_motion_factor(const AV1_COMP *cpi,
                                      const FIRSTPASS_STATS *frame) {
   const double zero_motion_pct = frame->pcnt_inter - frame->pcnt_motion;
   double sr_decay = get_sr_decay_rate(cpi, frame);
@@ -1242,7 +1242,7 @@ static double get_zero_motion_factor(const VP10_COMP *cpi,
 
 #define ZM_POWER_FACTOR 0.75
 
-static double get_prediction_decay_rate(const VP10_COMP *cpi,
+static double get_prediction_decay_rate(const AV1_COMP *cpi,
                                         const FIRSTPASS_STATS *next_frame) {
   const double sr_decay_rate = get_sr_decay_rate(cpi, next_frame);
   const double zero_motion_factor =
@@ -1256,7 +1256,7 @@ static double get_prediction_decay_rate(const VP10_COMP *cpi,
 // Function to test for a condition where a complex transition is followed
 // by a static section. For example in slide shows where there is a fade
 // between slides. This is to help with more optimal kf and gf positioning.
-static int detect_transition_to_still(VP10_COMP *cpi, int frame_interval,
+static int detect_transition_to_still(AV1_COMP *cpi, int frame_interval,
                                       int still_interval,
                                       double loop_decay_rate,
                                       double last_decay_rate) {
@@ -1330,11 +1330,11 @@ static void accumulate_frame_motion_stats(const FIRSTPASS_STATS *stats,
 }
 
 #define BASELINE_ERR_PER_MB 1000.0
-static double calc_frame_boost(VP10_COMP *cpi,
+static double calc_frame_boost(AV1_COMP *cpi,
                                const FIRSTPASS_STATS *this_frame,
                                double this_frame_mv_in_out, double max_boost) {
   double frame_boost;
-  const double lq = vp10_convert_qindex_to_q(
+  const double lq = av1_convert_qindex_to_q(
       cpi->rc.avg_frame_qindex[INTER_FRAME], cpi->common.bit_depth);
   const double boost_q_correction = VPXMIN((0.5 + (lq * 0.015)), 1.5);
   int num_mbs = (cpi->oxcf.resize_mode != RESIZE_NONE) ? cpi->initial_mbs
@@ -1360,7 +1360,7 @@ static double calc_frame_boost(VP10_COMP *cpi,
   return VPXMIN(frame_boost, max_boost * boost_q_correction);
 }
 
-static int calc_arf_boost(VP10_COMP *cpi, int offset, int f_frames,
+static int calc_arf_boost(AV1_COMP *cpi, int offset, int f_frames,
                           int b_frames, int *f_boost, int *b_boost) {
   TWO_PASS *const twopass = &cpi->twopass;
   int i;
@@ -1468,7 +1468,7 @@ static int calculate_section_intra_ratio(const FIRSTPASS_STATS *begin,
 }
 
 // Calculate the total bits to allocate in this GF/ARF group.
-static int64_t calculate_total_gf_group_bits(VP10_COMP *cpi,
+static int64_t calculate_total_gf_group_bits(AV1_COMP *cpi,
                                              double gf_group_err) {
   const RATE_CONTROL *const rc = &cpi->rc;
   const TWO_PASS *const twopass = &cpi->twopass;
@@ -1530,10 +1530,10 @@ static void get_arf_buffer_indices(unsigned char *arf_buffer_indices) {
   arf_buffer_indices[1] = ARF_SLOT2;
 }
 
-static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
+static void allocate_gf_group_bits(AV1_COMP *cpi, int64_t gf_group_bits,
                                    double group_error, int gf_arf_bits) {
   RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   TWO_PASS *const twopass = &cpi->twopass;
   GF_GROUP *const gf_group = &twopass->gf_group;
   FIRSTPASS_STATS frame_stats;
@@ -1645,7 +1645,7 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
   // Note:
   // We need to configure the frame at the end of the sequence + 1 that will be
   // the start frame for the next group. Otherwise prior to the call to
-  // vp10_rc_get_second_pass_params() the data will be undefined.
+  // av1_rc_get_second_pass_params() the data will be undefined.
   gf_group->arf_update_idx[frame_index] = arf_buffer_indices[0];
   gf_group->arf_ref_idx[frame_index] = arf_buffer_indices[0];
 
@@ -1670,10 +1670,10 @@ static void allocate_gf_group_bits(VP10_COMP *cpi, int64_t gf_group_bits,
 }
 
 // Analyse and define a gf/arf group.
-static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+static void define_gf_group(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
-  VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  AV1EncoderConfig *const oxcf = &cpi->oxcf;
   TWO_PASS *const twopass = &cpi->twopass;
   FIRSTPASS_STATS next_frame;
   const FIRSTPASS_STATS *const start_pos = twopass->stats_in;
@@ -1717,11 +1717,11 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   // Reset the GF group data structures unless this is a key
   // frame in which case it will already have been done.
   if (is_key_frame == 0) {
-    vp10_zero(twopass->gf_group);
+    av1_zero(twopass->gf_group);
   }
 
   aom_clear_system_state();
-  vp10_zero(next_frame);
+  av1_zero(next_frame);
 
   // Load stats for the current frame.
   mod_frame_err = calculate_modified_err(cpi, twopass, oxcf, this_frame);
@@ -1748,9 +1748,9 @@ static void define_gf_group(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   // Set a maximum and minimum interval for the GF group.
   // If the image appears almost completely static we can extend beyond this.
   {
-    int int_max_q = (int)(vp10_convert_qindex_to_q(
+    int int_max_q = (int)(av1_convert_qindex_to_q(
         twopass->active_worst_quality, cpi->common.bit_depth));
-    int int_lbq = (int)(vp10_convert_qindex_to_q(rc->last_boosted_qindex,
+    int int_lbq = (int)(av1_convert_qindex_to_q(rc->last_boosted_qindex,
                                                  cpi->common.bit_depth));
     active_min_gf_interval = rc->min_gf_interval + VPXMIN(2, int_max_q / 200);
     if (active_min_gf_interval > rc->max_gf_interval)
@@ -2061,12 +2061,12 @@ static int test_candidate_kf(TWO_PASS *twopass,
   return is_viable_kf;
 }
 
-static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
+static void find_next_key_frame(AV1_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   int i, j;
   RATE_CONTROL *const rc = &cpi->rc;
   TWO_PASS *const twopass = &cpi->twopass;
   GF_GROUP *const gf_group = &twopass->gf_group;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const FIRSTPASS_STATS first_frame = *this_frame;
   const FIRSTPASS_STATS *const start_position = twopass->stats_in;
   FIRSTPASS_STATS next_frame;
@@ -2081,12 +2081,12 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
   double kf_group_err = 0.0;
   double recent_loop_decay[8] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 };
 
-  vp10_zero(next_frame);
+  av1_zero(next_frame);
 
   cpi->common.frame_type = KEY_FRAME;
 
   // Reset the GF group data structures.
-  vp10_zero(*gf_group);
+  av1_zero(*gf_group);
 
   // Is this a forced key frame by interval.
   rc->this_key_frame_forced = rc->next_key_frame_forced;
@@ -2293,7 +2293,7 @@ static void find_next_key_frame(VP10_COMP *cpi, FIRSTPASS_STATS *this_frame) {
 }
 
 // Define the reference buffers that will be updated post encode.
-static void configure_buffer_updates(VP10_COMP *cpi) {
+static void configure_buffer_updates(AV1_COMP *cpi) {
   TWO_PASS *const twopass = &cpi->twopass;
 
   cpi->rc.is_src_frame_alt_ref = 0;
@@ -2328,7 +2328,7 @@ static void configure_buffer_updates(VP10_COMP *cpi) {
   }
 }
 
-static int is_skippable_frame(const VP10_COMP *cpi) {
+static int is_skippable_frame(const AV1_COMP *cpi) {
   // If the current frame does not have non-zero motion vector detected in the
   // first  pass, and so do its previous and forward frames, then this frame
   // can be skipped for partition check, and the partition size is assigned
@@ -2347,8 +2347,8 @@ static int is_skippable_frame(const VP10_COMP *cpi) {
           twopass->stats_in->pcnt_inter - twopass->stats_in->pcnt_motion == 1);
 }
 
-void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_second_pass_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   TWO_PASS *const twopass = &cpi->twopass;
   GF_GROUP *const gf_group = &twopass->gf_group;
@@ -2367,7 +2367,7 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
     int target_rate;
     configure_buffer_updates(cpi);
     target_rate = gf_group->bit_allocation[gf_group->index];
-    target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+    target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
     rc->base_frame_target = target_rate;
 
     cm->frame_type = INTER_FRAME;
@@ -2405,12 +2405,12 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
     twopass->baseline_active_worst_quality = tmp_q;
     rc->ni_av_qi = tmp_q;
     rc->last_q[INTER_FRAME] = tmp_q;
-    rc->avg_q = vp10_convert_qindex_to_q(tmp_q, cm->bit_depth);
+    rc->avg_q = av1_convert_qindex_to_q(tmp_q, cm->bit_depth);
     rc->avg_frame_qindex[INTER_FRAME] = tmp_q;
     rc->last_q[KEY_FRAME] = (tmp_q + cpi->oxcf.best_allowed_q) / 2;
     rc->avg_frame_qindex[KEY_FRAME] = rc->last_q[KEY_FRAME];
   }
-  vp10_zero(this_frame);
+  av1_zero(this_frame);
   if (EOF == input_stats(twopass, &this_frame)) return;
 
   // Set the frame content type flag.
@@ -2460,9 +2460,9 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
 
   target_rate = gf_group->bit_allocation[gf_group->index];
   if (cpi->common.frame_type == KEY_FRAME)
-    target_rate = vp10_rc_clamp_iframe_target_size(cpi, target_rate);
+    target_rate = av1_rc_clamp_iframe_target_size(cpi, target_rate);
   else
-    target_rate = vp10_rc_clamp_pframe_target_size(cpi, target_rate);
+    target_rate = av1_rc_clamp_pframe_target_size(cpi, target_rate);
 
   rc->base_frame_target = target_rate;
 
@@ -2483,7 +2483,7 @@ void vp10_rc_get_second_pass_params(VP10_COMP *cpi) {
 #define MINQ_ADJ_LIMIT 48
 #define MINQ_ADJ_LIMIT_CQ 20
 #define HIGH_UNDERSHOOT_RATIO 2
-void vp10_twopass_postencode_update(VP10_COMP *cpi) {
+void av1_twopass_postencode_update(AV1_COMP *cpi) {
   TWO_PASS *const twopass = &cpi->twopass;
   RATE_CONTROL *const rc = &cpi->rc;
   const int bits_used = rc->base_frame_target;
diff --git a/av1/encoder/firstpass.h b/av1/encoder/firstpass.h
index 89ce3fbf6239714f11cefecd1f72940c8c34e631..f5fe3294abae2d872ab30eb46f09438b336bf1b1 100644
--- a/av1/encoder/firstpass.h
+++ b/av1/encoder/firstpass.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_FIRSTPASS_H_
-#define VP10_ENCODER_FIRSTPASS_H_
+#ifndef AV1_ENCODER_FIRSTPASS_H_
+#define AV1_ENCODER_FIRSTPASS_H_
 
 #include "av1/encoder/lookahead.h"
 #include "av1/encoder/ratectrl.h"
@@ -139,28 +139,28 @@ typedef struct {
   GF_GROUP gf_group;
 } TWO_PASS;
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_init_first_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_first_pass_params(struct VP10_COMP *cpi);
-void vp10_first_pass(struct VP10_COMP *cpi,
+void av1_init_first_pass(struct AV1_COMP *cpi);
+void av1_rc_get_first_pass_params(struct AV1_COMP *cpi);
+void av1_first_pass(struct AV1_COMP *cpi,
                      const struct lookahead_entry *source);
-void vp10_end_first_pass(struct VP10_COMP *cpi);
+void av1_end_first_pass(struct AV1_COMP *cpi);
 
-void vp10_init_second_pass(struct VP10_COMP *cpi);
-void vp10_rc_get_second_pass_params(struct VP10_COMP *cpi);
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_init_second_pass(struct AV1_COMP *cpi);
+void av1_rc_get_second_pass_params(struct AV1_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
 
 // Post encode update of the rate control parameters for 2-pass
-void vp10_twopass_postencode_update(struct VP10_COMP *cpi);
+void av1_twopass_postencode_update(struct AV1_COMP *cpi);
 
-void vp10_init_subsampling(struct VP10_COMP *cpi);
+void av1_init_subsampling(struct AV1_COMP *cpi);
 
-void vp10_calculate_coded_size(struct VP10_COMP *cpi, int *scaled_frame_width,
+void av1_calculate_coded_size(struct AV1_COMP *cpi, int *scaled_frame_width,
                                int *scaled_frame_height);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_FIRSTPASS_H_
+#endif  // AV1_ENCODER_FIRSTPASS_H_
diff --git a/av1/encoder/lookahead.c b/av1/encoder/lookahead.c
index 8206a5e07f35a5ee3434741032501fe1ca65c047..d245466afe7fac70ad1fd2217a25d544b7cdee26 100644
--- a/av1/encoder/lookahead.c
+++ b/av1/encoder/lookahead.c
@@ -31,7 +31,7 @@ static struct lookahead_entry *pop(struct lookahead_ctx *ctx,
   return buf;
 }
 
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
+void av1_lookahead_destroy(struct lookahead_ctx *ctx) {
   if (ctx) {
     if (ctx->buf) {
       unsigned int i;
@@ -43,11 +43,11 @@ void vp10_lookahead_destroy(struct lookahead_ctx *ctx) {
   }
 }
 
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
                                           unsigned int height,
                                           unsigned int subsampling_x,
                                           unsigned int subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                           int use_highbitdepth,
 #endif
                                           unsigned int depth) {
@@ -70,7 +70,7 @@ struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
     for (i = 0; i < depth; i++)
       if (aom_alloc_frame_buffer(
               &ctx->buf[i].img, width, height, subsampling_x, subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
               use_highbitdepth,
 #endif
               VPX_ENC_BORDER_IN_PIXELS, legacy_byte_alignment))
@@ -78,15 +78,15 @@ struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
   }
   return ctx;
 bail:
-  vp10_lookahead_destroy(ctx);
+  av1_lookahead_destroy(ctx);
   return NULL;
 }
 
 #define USE_PARTIAL_COPY 0
 
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
                         int64_t ts_start, int64_t ts_end,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                         int use_highbitdepth,
 #endif
                         unsigned int flags) {
@@ -119,7 +119,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
 
 #if USE_PARTIAL_COPY
   // TODO(jkoleszar): This is disabled for now, as
-  // vp10_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
+  // av1_copy_and_extend_frame_with_rect is not subsampling/alpha aware.
 
   // Only do this partial copy if the following conditions are all met:
   // 1. Lookahead queue has has size of 1.
@@ -146,7 +146,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
         }
 
         // Only copy this active region.
-        vp10_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
+        av1_copy_and_extend_frame_with_rect(src, &buf->img, row << 4, col << 4,
                                              16, (active_end - col) << 4);
 
         // Start again from the end of this active region.
@@ -162,7 +162,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
       memset(&new_img, 0, sizeof(new_img));
       if (aom_alloc_frame_buffer(&new_img, width, height, subsampling_x,
                                  subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                  use_highbitdepth,
 #endif
                                  VPX_ENC_BORDER_IN_PIXELS, 0))
@@ -178,7 +178,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
       buf->img.subsampling_y = src->subsampling_y;
     }
     // Partial copy not implemented yet
-    vp10_copy_and_extend_frame(src, &buf->img);
+    av1_copy_and_extend_frame(src, &buf->img);
 #if USE_PARTIAL_COPY
   }
 #endif
@@ -189,7 +189,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
   return 0;
 }
 
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx,
                                            int drain) {
   struct lookahead_entry *buf = NULL;
 
@@ -200,7 +200,7 @@ struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
   return buf;
 }
 
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
                                             int index) {
   struct lookahead_entry *buf = NULL;
 
@@ -223,4 +223,4 @@ struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
   return buf;
 }
 
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx) { return ctx->sz; }
diff --git a/av1/encoder/lookahead.h b/av1/encoder/lookahead.h
index 148809aeb6bdc0fcd4b8a7008653ae3f55d3b3c2..b0e33ee652e95fc90245a26355ac6248b9bac2a9 100644
--- a/av1/encoder/lookahead.h
+++ b/av1/encoder/lookahead.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_LOOKAHEAD_H_
-#define VP10_ENCODER_LOOKAHEAD_H_
+#ifndef AV1_ENCODER_LOOKAHEAD_H_
+#define AV1_ENCODER_LOOKAHEAD_H_
 
 #include "aom_scale/yv12config.h"
 #include "aom/aom_integer.h"
@@ -44,18 +44,18 @@ struct lookahead_ctx {
  * The lookahead stage is a queue of frame buffers on which some analysis
  * may be done when buffers are enqueued.
  */
-struct lookahead_ctx *vp10_lookahead_init(unsigned int width,
+struct lookahead_ctx *av1_lookahead_init(unsigned int width,
                                           unsigned int height,
                                           unsigned int subsampling_x,
                                           unsigned int subsampling_y,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                                           int use_highbitdepth,
 #endif
                                           unsigned int depth);
 
 /**\brief Destroys the lookahead stage
  */
-void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
+void av1_lookahead_destroy(struct lookahead_ctx *ctx);
 
 /**\brief Enqueue a source buffer
  *
@@ -72,9 +72,9 @@ void vp10_lookahead_destroy(struct lookahead_ctx *ctx);
  * \param[in] flags       Flags set on this frame
  * \param[in] active_map  Map that specifies which macroblock is active
  */
-int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
+int av1_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
                         int64_t ts_start, int64_t ts_end,
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
                         int use_highbitdepth,
 #endif
                         unsigned int flags);
@@ -89,7 +89,7 @@ int vp10_lookahead_push(struct lookahead_ctx *ctx, YV12_BUFFER_CONFIG *src,
  * \retval NULL, if drain set and queue is empty
  * \retval NULL, if drain not set and queue not of the configured depth
  */
-struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_pop(struct lookahead_ctx *ctx,
                                            int drain);
 
 /**\brief Get a future source buffer to encode
@@ -99,17 +99,17 @@ struct lookahead_entry *vp10_lookahead_pop(struct lookahead_ctx *ctx,
  *
  * \retval NULL, if no buffer exists at the specified index
  */
-struct lookahead_entry *vp10_lookahead_peek(struct lookahead_ctx *ctx,
+struct lookahead_entry *av1_lookahead_peek(struct lookahead_ctx *ctx,
                                             int index);
 
 /**\brief Get the number of frames currently in the lookahead queue
  *
  * \param[in] ctx       Pointer to the lookahead context
  */
-unsigned int vp10_lookahead_depth(struct lookahead_ctx *ctx);
+unsigned int av1_lookahead_depth(struct lookahead_ctx *ctx);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_LOOKAHEAD_H_
+#endif  // AV1_ENCODER_LOOKAHEAD_H_
diff --git a/av1/encoder/mbgraph.c b/av1/encoder/mbgraph.c
index 80d2aefd48efb5d096c34dd4ed03963396514ee8..2196a0ebeabcbaeb29238f5ee517deadcea59b5b 100644
--- a/av1/encoder/mbgraph.c
+++ b/av1/encoder/mbgraph.c
@@ -23,7 +23,7 @@
 #include "av1/common/reconinter.h"
 #include "av1/common/reconintra.h"
 
-static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
+static unsigned int do_16x16_motion_iteration(AV1_COMP *cpi, const MV *ref_mv,
                                               MV *dst_mv, int mb_row,
                                               int mb_col) {
   MACROBLOCK *const x = &cpi->td.mb;
@@ -42,13 +42,13 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
   int step_param = mv_sf->reduce_first_step_size;
   step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
 
-  vp10_set_mv_search_range(x, ref_mv);
+  av1_set_mv_search_range(x, ref_mv);
 
   ref_full.col = ref_mv->col >> 3;
   ref_full.row = ref_mv->row >> 3;
 
   /*cpi->sf.search_method == HEX*/
-  vp10_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
+  av1_hex_search(x, &ref_full, step_param, x->errorperbit, 0,
                   cond_cost_list(cpi, cost_list), &v_fn_ptr, 0, ref_mv, dst_mv);
 
   // Try sub-pixel MC
@@ -66,7 +66,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
   xd->mi[0]->mbmi.mode = NEWMV;
   xd->mi[0]->mbmi.mv[0].as_mv = *dst_mv;
 
-  vp10_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
+  av1_build_inter_predictors_sby(xd, mb_row, mb_col, BLOCK_16X16);
 
   /* restore UMV window */
   x->mv_col_min = tmp_col_min;
@@ -78,7 +78,7 @@ static unsigned int do_16x16_motion_iteration(VP10_COMP *cpi, const MV *ref_mv,
                       xd->plane[0].dst.buf, xd->plane[0].dst.stride);
 }
 
-static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv,
+static int do_16x16_motion_search(AV1_COMP *cpi, const MV *ref_mv,
                                   int_mv *dst_mv, int mb_row, int mb_col) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -116,7 +116,7 @@ static int do_16x16_motion_search(VP10_COMP *cpi, const MV *ref_mv,
   return err;
 }
 
-static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
+static int do_16x16_zerozero_search(AV1_COMP *cpi, int_mv *dst_mv) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   unsigned int err;
@@ -130,7 +130,7 @@ static int do_16x16_zerozero_search(VP10_COMP *cpi, int_mv *dst_mv) {
 
   return err;
 }
-static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
+static int find_best_16x16_intra(AV1_COMP *cpi, PREDICTION_MODE *pbest_mode) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   PREDICTION_MODE best_mode = -1, mode;
@@ -142,7 +142,7 @@ static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
     unsigned int err;
 
     xd->mi[0]->mbmi.mode = mode;
-    vp10_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
+    av1_predict_intra_block(xd, 2, 2, TX_16X16, mode, x->plane[0].src.buf,
                              x->plane[0].src.stride, xd->plane[0].dst.buf,
                              xd->plane[0].dst.stride, 0, 0, 0);
     err = aom_sad16x16(x->plane[0].src.buf, x->plane[0].src.stride,
@@ -160,7 +160,7 @@ static int find_best_16x16_intra(VP10_COMP *cpi, PREDICTION_MODE *pbest_mode) {
   return best_err;
 }
 
-static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
+static void update_mbgraph_mb_stats(AV1_COMP *cpi, MBGRAPH_MB_STATS *stats,
                                     YV12_BUFFER_CONFIG *buf, int mb_y_offset,
                                     YV12_BUFFER_CONFIG *golden_ref,
                                     const MV *prev_golden_ref_mv,
@@ -169,7 +169,7 @@ static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   int intra_error;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
 
   // FIXME in practice we're completely ignoring chroma here
   x->plane[0].src.buf = buf->y_buffer + mb_y_offset;
@@ -213,21 +213,21 @@ static void update_mbgraph_mb_stats(VP10_COMP *cpi, MBGRAPH_MB_STATS *stats,
   }
 }
 
-static void update_mbgraph_frame_stats(VP10_COMP *cpi,
+static void update_mbgraph_frame_stats(AV1_COMP *cpi,
                                        MBGRAPH_FRAME_STATS *stats,
                                        YV12_BUFFER_CONFIG *buf,
                                        YV12_BUFFER_CONFIG *golden_ref,
                                        YV12_BUFFER_CONFIG *alt_ref) {
   MACROBLOCK *const x = &cpi->td.mb;
   MACROBLOCKD *const xd = &x->e_mbd;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   int mb_col, mb_row, offset = 0;
   int mb_y_offset = 0, arf_y_offset = 0, gld_y_offset = 0;
   MV gld_top_mv = { 0, 0 };
   MODE_INFO mi_local;
 
-  vp10_zero(mi_local);
+  av1_zero(mi_local);
   // Set up limit values for motion vectors to prevent them extending outside
   // the UMV borders.
   x->mv_row_min = -BORDER_MV_PIXELS_B16;
@@ -280,8 +280,8 @@ static void update_mbgraph_frame_stats(VP10_COMP *cpi,
 }
 
 // void separate_arf_mbs_byzz
-static void separate_arf_mbs(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+static void separate_arf_mbs(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   int mb_col, mb_row, offset, i;
   int mi_row, mi_col;
   int ncnt[4] = { 0 };
@@ -347,19 +347,19 @@ static void separate_arf_mbs(VP10_COMP *cpi) {
     else
       cpi->static_mb_pct = 0;
 
-    vp10_enable_segmentation(&cm->seg);
+    av1_enable_segmentation(&cm->seg);
   } else {
     cpi->static_mb_pct = 0;
-    vp10_disable_segmentation(&cm->seg);
+    av1_disable_segmentation(&cm->seg);
   }
 
   // Free localy allocated storage
   aom_free(arf_not_zz);
 }
 
-void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
-  int i, n_frames = vp10_lookahead_depth(cpi->lookahead);
+void av1_update_mbgraph_stats(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
+  int i, n_frames = av1_lookahead_depth(cpi->lookahead);
   YV12_BUFFER_CONFIG *golden_ref = get_ref_frame_buffer(cpi, GOLDEN_FRAME);
 
   assert(golden_ref != NULL);
@@ -383,7 +383,7 @@ void vp10_update_mbgraph_stats(VP10_COMP *cpi) {
   // the ARF MC search backwards, to get optimal results for MV caching
   for (i = 0; i < n_frames; i++) {
     MBGRAPH_FRAME_STATS *frame_stats = &cpi->mbgraph_stats[i];
-    struct lookahead_entry *q_cur = vp10_lookahead_peek(cpi->lookahead, i);
+    struct lookahead_entry *q_cur = av1_lookahead_peek(cpi->lookahead, i);
 
     assert(q_cur != NULL);
 
diff --git a/av1/encoder/mbgraph.h b/av1/encoder/mbgraph.h
index 8c034da03aba09acafc48a5c56799c1c892b8e0b..db005e1a58917a54db64a2c03fbee7f4a4798f24 100644
--- a/av1/encoder/mbgraph.h
+++ b/av1/encoder/mbgraph.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_MBGRAPH_H_
-#define VP10_ENCODER_MBGRAPH_H_
+#ifndef AV1_ENCODER_MBGRAPH_H_
+#define AV1_ENCODER_MBGRAPH_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -28,12 +28,12 @@ typedef struct {
 
 typedef struct { MBGRAPH_MB_STATS *mb_stats; } MBGRAPH_FRAME_STATS;
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_update_mbgraph_stats(struct VP10_COMP *cpi);
+void av1_update_mbgraph_stats(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_MBGRAPH_H_
+#endif  // AV1_ENCODER_MBGRAPH_H_
diff --git a/av1/encoder/mcomp.c b/av1/encoder/mcomp.c
index a82d1519db491b5a01fa184afd8fa8eea82ff90b..b3690e8b3ad993b63045837c9466abe4764440a1 100644
--- a/av1/encoder/mcomp.c
+++ b/av1/encoder/mcomp.c
@@ -33,7 +33,7 @@ static INLINE const uint8_t *get_buf_from_mv(const struct buf_2d *buf,
   return &buf->buf[mv->row * buf->stride + mv->col];
 }
 
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
   int col_min = (mv->col >> 3) - MAX_FULL_PEL_VAL + (mv->col & 7 ? 1 : 0);
   int row_min = (mv->row >> 3) - MAX_FULL_PEL_VAL + (mv->row & 7 ? 1 : 0);
   int col_max = (mv->col >> 3) + MAX_FULL_PEL_VAL;
@@ -52,7 +52,7 @@ void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv) {
   if (x->mv_row_max > row_max) x->mv_row_max = row_max;
 }
 
-int vp10_init_search_range(int size) {
+int av1_init_search_range(int size) {
   int sr = 0;
   // Minimum search size no matter what the passed in value.
   size = VPXMAX(16, size);
@@ -65,11 +65,11 @@ int vp10_init_search_range(int size) {
 
 static INLINE int mv_cost(const MV *mv, const int *joint_cost,
                           int *const comp_cost[2]) {
-  return joint_cost[vp10_get_mv_joint(mv)] + comp_cost[0][mv->row] +
+  return joint_cost[av1_get_mv_joint(mv)] + comp_cost[0][mv->row] +
          comp_cost[1][mv->col];
 }
 
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
                      int *mvcost[2], int weight) {
   const MV diff = { mv->row - ref->row, mv->col - ref->col };
   return ROUND_POWER_OF_TWO(mv_cost(&diff, mvjcost, mvcost) * weight, 7);
@@ -98,7 +98,7 @@ static int mvsad_err_cost(const MACROBLOCK *x, const MV *mv, const MV *ref,
       VP9_PROB_COST_SHIFT);
 }
 
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride) {
   int len, ss_count = 1;
 
   cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -119,7 +119,7 @@ void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride) {
   cfg->searches_per_step = 4;
 }
 
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride) {
+void av1_init3smotion_compensation(search_site_config *cfg, int stride) {
   int len, ss_count = 1;
 
   cfg->ss[0].mv.col = cfg->ss[0].mv.row = 0;
@@ -296,7 +296,7 @@ static unsigned int setup_center_error(
     int y_stride, const uint8_t *second_pred, int w, int h, int offset,
     int *mvjcost, int *mvcost[2], unsigned int *sse1, int *distortion) {
   unsigned int besterr;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (second_pred != NULL) {
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       DECLARE_ALIGNED(16, uint16_t, comp_pred16[64 * 64]);
@@ -325,7 +325,7 @@ static unsigned int setup_center_error(
   }
   *distortion = besterr;
   besterr += mv_err_cost(bestmv, ref_mv, mvjcost, mvcost, error_per_bit);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   return besterr;
 }
 
@@ -353,7 +353,7 @@ static void get_cost_surf_min(int *cost_list, int *ir, int *ic, int bits) {
                          (cost_list[4] - 2 * cost_list[0] + cost_list[2]));
 }
 
-int vp10_find_best_sub_pixel_tree_pruned_evenmore(
+int av1_find_best_sub_pixel_tree_pruned_evenmore(
     const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
     int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
     int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
@@ -404,7 +404,7 @@ int vp10_find_best_sub_pixel_tree_pruned_evenmore(
   tr = br;
   tc = bc;
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     hstep >>= 1;
     FIRST_LEVEL_CHECKS;
     if (eighthiters > 1) {
@@ -422,7 +422,7 @@ int vp10_find_best_sub_pixel_tree_pruned_evenmore(
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree_pruned_more(
+int av1_find_best_sub_pixel_tree_pruned_more(
     const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
     int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
     int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
@@ -462,7 +462,7 @@ int vp10_find_best_sub_pixel_tree_pruned_more(
     }
   }
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     tr = br;
     tc = bc;
     hstep >>= 1;
@@ -486,7 +486,7 @@ int vp10_find_best_sub_pixel_tree_pruned_more(
   return besterr;
 }
 
-int vp10_find_best_sub_pixel_tree_pruned(
+int av1_find_best_sub_pixel_tree_pruned(
     const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
     int error_per_bit, const aom_variance_fn_ptr_t *vfp, int forced_stop,
     int iters_per_step, int *cost_list, int *mvjcost, int *mvcost[2],
@@ -548,7 +548,7 @@ int vp10_find_best_sub_pixel_tree_pruned(
     tc = bc;
   }
 
-  if (allow_hp && vp10_use_mv_hp(ref_mv) && forced_stop == 0) {
+  if (allow_hp && av1_use_mv_hp(ref_mv) && forced_stop == 0) {
     hstep >>= 1;
     FIRST_LEVEL_CHECKS;
     if (eighthiters > 1) {
@@ -578,7 +578,7 @@ static const MV search_step_table[12] = {
   { -2, 0 }, { 2, 0 }, { 0, -1 }, { 0, 1 }, { -1, 0 }, { 1, 0 }
 };
 
-int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
+int av1_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
                                   const MV *ref_mv, int allow_hp,
                                   int error_per_bit,
                                   const aom_variance_fn_ptr_t *vfp,
@@ -614,7 +614,7 @@ int vp10_find_best_sub_pixel_tree(const MACROBLOCK *x, MV *bestmv,
   unsigned int cost_array[5];
   int kr, kc;
 
-  if (!(allow_hp && vp10_use_mv_hp(ref_mv)))
+  if (!(allow_hp && av1_use_mv_hp(ref_mv)))
     if (round == 3) round = 2;
 
   bestmv->row *= 8;
@@ -801,7 +801,7 @@ static INLINE void calc_int_cost_list(const MACROBLOCK *x, const MV *ref_mv,
 // candidates as indicated in the num_candidates and candidates arrays
 // passed into this function
 //
-static int vp10_pattern_search(
+static int av1_pattern_search(
     const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
     int do_init_search, int *cost_list, const aom_variance_fn_ptr_t *vfp,
     int use_mvcost, const MV *center_mv, MV *best_mv,
@@ -970,7 +970,7 @@ static int vp10_pattern_search(
 // are 4 1-away neighbors, and cost_list is non-null
 // TODO(debargha): Merge this function with the one above. Also remove
 // use_mvcost option since it is always 1, to save unnecessary branches.
-static int vp10_pattern_search_sad(
+static int av1_pattern_search_sad(
     const MACROBLOCK *x, MV *ref_mv, int search_param, int sad_per_bit,
     int do_init_search, int *cost_list, const aom_variance_fn_ptr_t *vfp,
     int use_mvcost, const MV *center_mv, MV *best_mv,
@@ -1246,7 +1246,7 @@ static int vp10_pattern_search_sad(
   return bestsad;
 }
 
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
                         const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
                         int use_mvcost) {
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1262,7 +1262,7 @@ int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
                      : 0);
 }
 
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
                            const MV *center_mv, const uint8_t *second_pred,
                            const aom_variance_fn_ptr_t *vfp, int use_mvcost) {
   const MACROBLOCKD *const xd = &x->e_mbd;
@@ -1278,7 +1278,7 @@ int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
                      : 0);
 }
 
-int vp10_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
                     int sad_per_bit, int do_init_search, int *cost_list,
                     const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                     const MV *center_mv, MV *best_mv) {
@@ -1337,12 +1337,12 @@ int vp10_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
       { -512, 1024 },
       { -1024, 0 } },
   };
-  return vp10_pattern_search(
+  return av1_pattern_search(
       x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
       use_mvcost, center_mv, best_mv, hex_num_candidates, hex_candidates);
 }
 
-int vp10_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
                        int sad_per_bit, int do_init_search, int *cost_list,
                        const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                        const MV *center_mv, MV *best_mv) {
@@ -1436,12 +1436,12 @@ int vp10_bigdia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
           { -512, 512 },
           { -1024, 0 } },
       };
-  return vp10_pattern_search_sad(
+  return av1_pattern_search_sad(
       x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
       use_mvcost, center_mv, best_mv, bigdia_num_candidates, bigdia_candidates);
 }
 
-int vp10_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
                        int sad_per_bit, int do_init_search, int *cost_list,
                        const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                        const MV *center_mv, MV *best_mv) {
@@ -1541,26 +1541,26 @@ int vp10_square_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
           { -1024, 1024 },
           { -1024, 0 } },
       };
-  return vp10_pattern_search(
+  return av1_pattern_search(
       x, ref_mv, search_param, sad_per_bit, do_init_search, cost_list, vfp,
       use_mvcost, center_mv, best_mv, square_num_candidates, square_candidates);
 }
 
-int vp10_fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_fast_hex_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
                          int sad_per_bit,
                          int do_init_search,  // must be zero for fast_hex
                          int *cost_list, const aom_variance_fn_ptr_t *vfp,
                          int use_mvcost, const MV *center_mv, MV *best_mv) {
-  return vp10_hex_search(
+  return av1_hex_search(
       x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
       do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
 }
 
-int vp10_fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
+int av1_fast_dia_search(const MACROBLOCK *x, MV *ref_mv, int search_param,
                          int sad_per_bit, int do_init_search, int *cost_list,
                          const aom_variance_fn_ptr_t *vfp, int use_mvcost,
                          const MV *center_mv, MV *best_mv) {
-  return vp10_bigdia_search(
+  return av1_bigdia_search(
       x, ref_mv, VPXMAX(MAX_MVSEARCH_STEPS - 2, search_param), sad_per_bit,
       do_init_search, cost_list, vfp, use_mvcost, center_mv, best_mv);
 }
@@ -1655,7 +1655,7 @@ static int exhuastive_mesh_search(const MACROBLOCK *x, MV *ref_mv, MV *best_mv,
   return best_sad;
 }
 
-int vp10_diamond_search_sad_c(const MACROBLOCK *x,
+int av1_diamond_search_sad_c(const MACROBLOCK *x,
                               const search_site_config *cfg, MV *ref_mv,
                               MV *best_mv, int search_param, int sad_per_bit,
                               int *num00, const aom_variance_fn_ptr_t *fn_ptr,
@@ -1865,7 +1865,7 @@ static const MV search_pos[4] = {
   { -1, 0 }, { 0, -1 }, { 0, 1 }, { 1, 0 },
 };
 
-unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
+unsigned int av1_int_pro_motion_estimation(const AV1_COMP *cpi, MACROBLOCK *x,
                                             BLOCK_SIZE bsize, int mi_row,
                                             int mi_col) {
   MACROBLOCKD *xd = &x->e_mbd;
@@ -1888,7 +1888,7 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
   MV this_mv;
   const int norm_factor = 3 + (bw >> 5);
   const YV12_BUFFER_CONFIG *scaled_ref_frame =
-      vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
+      av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]);
 
   if (scaled_ref_frame) {
     int i;
@@ -1896,10 +1896,10 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
     // match the resolution of the current frame, allowing the existing
     // motion search code to be used without additional modifications.
     for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
-    vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+    av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   {
     unsigned int this_sad;
     tmp_mv->row = 0;
@@ -1997,7 +1997,7 @@ unsigned int vp10_int_pro_motion_estimation(const VP10_COMP *cpi, MACROBLOCK *x,
 /* do_refine: If last step (1-away) of n-step search doesn't pick the center
               point as the best match, we will do a final 1-away diamond
               refining search  */
-int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
+int av1_full_pixel_diamond(const AV1_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
                             int step_param, int sadpb, int further_steps,
                             int do_refine, int *cost_list,
                             const aom_variance_fn_ptr_t *fn_ptr,
@@ -2007,7 +2007,7 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
   int bestsme = cpi->diamond_search_sad(x, &cpi->ss_cfg, mvp_full, &temp_mv,
                                         step_param, sadpb, &n, fn_ptr, ref_mv);
   if (bestsme < INT_MAX)
-    bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+    bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
   *dst_mv = temp_mv;
 
   // If there won't be more n-step search, check to see if refining search is
@@ -2024,7 +2024,7 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
                                         step_param + n, sadpb, &num00, fn_ptr,
                                         ref_mv);
       if (thissme < INT_MAX)
-        thissme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+        thissme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
 
       // check to see if refining search is needed.
       if (num00 > further_steps - n) do_refine = 0;
@@ -2040,10 +2040,10 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
   if (do_refine) {
     const int search_range = 8;
     MV best_mv = *dst_mv;
-    thissme = vp10_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
+    thissme = av1_refining_search_sad(x, &best_mv, sadpb, search_range, fn_ptr,
                                        ref_mv);
     if (thissme < INT_MAX)
-      thissme = vp10_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
+      thissme = av1_get_mvpred_var(x, &best_mv, ref_mv, fn_ptr, 1);
     if (thissme < bestsme) {
       bestsme = thissme;
       *dst_mv = best_mv;
@@ -2062,7 +2062,7 @@ int vp10_full_pixel_diamond(const VP10_COMP *cpi, MACROBLOCK *x, MV *mvp_full,
 #define MIN_INTERVAL 1
 // Runs an limited range exhaustive mesh search using a pattern set
 // according to the encode speed profile.
-static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
+static int full_pixel_exhaustive(AV1_COMP *cpi, MACROBLOCK *x,
                                  MV *centre_mv_full, int sadpb, int *cost_list,
                                  const aom_variance_fn_ptr_t *fn_ptr,
                                  const MV *ref_mv, MV *dst_mv) {
@@ -2109,7 +2109,7 @@ static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
   }
 
   if (bestsme < INT_MAX)
-    bestsme = vp10_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
+    bestsme = av1_get_mvpred_var(x, &temp_mv, ref_mv, fn_ptr, 1);
   *dst_mv = temp_mv;
 
   // Return cost list.
@@ -2119,7 +2119,7 @@ static int full_pixel_exhaustive(VP10_COMP *cpi, MACROBLOCK *x,
   return bestsme;
 }
 
-int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
+int av1_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
                            int sad_per_bit, int distance,
                            const aom_variance_fn_ptr_t *fn_ptr,
                            const MV *center_mv, MV *best_mv) {
@@ -2154,7 +2154,7 @@ int vp10_full_search_sad_c(const MACROBLOCK *x, const MV *ref_mv,
   return best_sad;
 }
 
-int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
+int av1_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
                            int sad_per_bit, int distance,
                            const aom_variance_fn_ptr_t *fn_ptr,
                            const MV *center_mv, MV *best_mv) {
@@ -2220,7 +2220,7 @@ int vp10_full_search_sadx3(const MACROBLOCK *x, const MV *ref_mv,
   return best_sad;
 }
 
-int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
+int av1_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
                            int sad_per_bit, int distance,
                            const aom_variance_fn_ptr_t *fn_ptr,
                            const MV *center_mv, MV *best_mv) {
@@ -2310,7 +2310,7 @@ int vp10_full_search_sadx8(const MACROBLOCK *x, const MV *ref_mv,
   return best_sad;
 }
 
-int vp10_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
+int av1_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
                              int search_range,
                              const aom_variance_fn_ptr_t *fn_ptr,
                              const MV *center_mv) {
@@ -2385,7 +2385,7 @@ int vp10_refining_search_sad(const MACROBLOCK *x, MV *ref_mv, int error_per_bit,
 
 // This function is called when we do joint motion search in comp_inter_inter
 // mode.
-int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+int av1_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
                               int error_per_bit, int search_range,
                               const aom_variance_fn_ptr_t *fn_ptr,
                               const MV *center_mv, const uint8_t *second_pred) {
@@ -2433,7 +2433,7 @@ int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
 }
 
 #define MIN_EX_SEARCH_LIMIT 128
-static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
+static int is_exhaustive_allowed(AV1_COMP *cpi, MACROBLOCK *x) {
   const SPEED_FEATURES *const sf = &cpi->sf;
   const int max_ex =
       VPXMAX(MIN_EX_SEARCH_LIMIT,
@@ -2444,7 +2444,7 @@ static int is_exhaustive_allowed(VP10_COMP *cpi, MACROBLOCK *x) {
          (*x->ex_search_count_ptr <= max_ex) && !cpi->rc.is_src_frame_alt_ref;
 }
 
-int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+int av1_full_pixel_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
                            MV *mvp_full, int step_param, int error_per_bit,
                            int *cost_list, const MV *ref_mv, MV *tmp_mv,
                            int var_max, int rd) {
@@ -2465,27 +2465,27 @@ int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
 
   switch (method) {
     case FAST_DIAMOND:
-      var = vp10_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
+      var = av1_fast_dia_search(x, mvp_full, step_param, error_per_bit, 0,
                                  cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case FAST_HEX:
-      var = vp10_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
+      var = av1_fast_hex_search(x, mvp_full, step_param, error_per_bit, 0,
                                  cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case HEX:
-      var = vp10_hex_search(x, mvp_full, step_param, error_per_bit, 1,
+      var = av1_hex_search(x, mvp_full, step_param, error_per_bit, 1,
                             cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case SQUARE:
-      var = vp10_square_search(x, mvp_full, step_param, error_per_bit, 1,
+      var = av1_square_search(x, mvp_full, step_param, error_per_bit, 1,
                                cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case BIGDIA:
-      var = vp10_bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
+      var = av1_bigdia_search(x, mvp_full, step_param, error_per_bit, 1,
                                cost_list, fn_ptr, 1, ref_mv, tmp_mv);
       break;
     case NSTEP:
-      var = vp10_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
+      var = av1_full_pixel_diamond(cpi, x, mvp_full, step_param, error_per_bit,
                                     MAX_MVSEARCH_STEPS - 1 - step_param, 1,
                                     cost_list, fn_ptr, ref_mv, tmp_mv);
 
@@ -2515,7 +2515,7 @@ int vp10_full_pixel_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
   }
 
   if (method != NSTEP && rd && var < var_max)
-    var = vp10_get_mvpred_var(x, tmp_mv, ref_mv, fn_ptr, 1);
+    var = av1_get_mvpred_var(x, tmp_mv, ref_mv, fn_ptr, 1);
 
   return var;
 }
diff --git a/av1/encoder/mcomp.h b/av1/encoder/mcomp.h
index d6032889ae1a4b34d96e9c1fb19d206e89bae4b8..f3516c223142ab9c4fed14a2c593dd59ed44a61a 100644
--- a/av1/encoder/mcomp.h
+++ b/av1/encoder/mcomp.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_MCOMP_H_
-#define VP10_ENCODER_MCOMP_H_
+#ifndef AV1_ENCODER_MCOMP_H_
+#define AV1_ENCODER_MCOMP_H_
 
 #include "av1/encoder/block.h"
 #include "aom_dsp/variance.h"
@@ -43,40 +43,40 @@ typedef struct search_site_config {
   int searches_per_step;
 } search_site_config;
 
-void vp10_init_dsmotion_compensation(search_site_config *cfg, int stride);
-void vp10_init3smotion_compensation(search_site_config *cfg, int stride);
+void av1_init_dsmotion_compensation(search_site_config *cfg, int stride);
+void av1_init3smotion_compensation(search_site_config *cfg, int stride);
 
-void vp10_set_mv_search_range(MACROBLOCK *x, const MV *mv);
-int vp10_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
+void av1_set_mv_search_range(MACROBLOCK *x, const MV *mv);
+int av1_mv_bit_cost(const MV *mv, const MV *ref, const int *mvjcost,
                      int *mvcost[2], int weight);
 
 // Utility to compute variance + MV rate cost for a given MV
-int vp10_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_var(const MACROBLOCK *x, const MV *best_mv,
                         const MV *center_mv, const aom_variance_fn_ptr_t *vfp,
                         int use_mvcost);
-int vp10_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
+int av1_get_mvpred_av_var(const MACROBLOCK *x, const MV *best_mv,
                            const MV *center_mv, const uint8_t *second_pred,
                            const aom_variance_fn_ptr_t *vfp, int use_mvcost);
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct SPEED_FEATURES;
 
-int vp10_init_search_range(int size);
+int av1_init_search_range(int size);
 
-int vp10_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
+int av1_refining_search_sad(const struct macroblock *x, struct mv *ref_mv,
                              int sad_per_bit, int distance,
                              const struct aom_variance_vtable *fn_ptr,
                              const struct mv *center_mv);
 
 // Runs sequence of diamond searches in smaller steps for RD.
-int vp10_full_pixel_diamond(const struct VP10_COMP *cpi, MACROBLOCK *x,
+int av1_full_pixel_diamond(const struct AV1_COMP *cpi, MACROBLOCK *x,
                             MV *mvp_full, int step_param, int sadpb,
                             int further_steps, int do_refine, int *cost_list,
                             const aom_variance_fn_ptr_t *fn_ptr,
                             const MV *ref_mv, MV *dst_mv);
 
 // Perform integral projection based motion estimation.
-unsigned int vp10_int_pro_motion_estimation(const struct VP10_COMP *cpi,
+unsigned int av1_int_pro_motion_estimation(const struct AV1_COMP *cpi,
                                             MACROBLOCK *x, BLOCK_SIZE bsize,
                                             int mi_row, int mi_col);
 
@@ -87,11 +87,11 @@ typedef int(integer_mv_pattern_search_fn)(const MACROBLOCK *x, MV *ref_mv,
                                           int use_mvcost, const MV *center_mv,
                                           MV *best_mv);
 
-integer_mv_pattern_search_fn vp10_hex_search;
-integer_mv_pattern_search_fn vp10_bigdia_search;
-integer_mv_pattern_search_fn vp10_square_search;
-integer_mv_pattern_search_fn vp10_fast_hex_search;
-integer_mv_pattern_search_fn vp10_fast_dia_search;
+integer_mv_pattern_search_fn av1_hex_search;
+integer_mv_pattern_search_fn av1_bigdia_search;
+integer_mv_pattern_search_fn av1_square_search;
+integer_mv_pattern_search_fn av1_fast_hex_search;
+integer_mv_pattern_search_fn av1_fast_dia_search;
 
 typedef int(fractional_mv_step_fp)(
     const MACROBLOCK *x, MV *bestmv, const MV *ref_mv, int allow_hp,
@@ -101,34 +101,34 @@ typedef int(fractional_mv_step_fp)(
     int *distortion, unsigned int *sse1, const uint8_t *second_pred, int w,
     int h);
 
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_more;
-extern fractional_mv_step_fp vp10_find_best_sub_pixel_tree_pruned_evenmore;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_more;
+extern fractional_mv_step_fp av1_find_best_sub_pixel_tree_pruned_evenmore;
 
-typedef int (*vp10_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
+typedef int (*av1_full_search_fn_t)(const MACROBLOCK *x, const MV *ref_mv,
                                      int sad_per_bit, int distance,
                                      const aom_variance_fn_ptr_t *fn_ptr,
                                      const MV *center_mv, MV *best_mv);
 
-typedef int (*vp10_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
+typedef int (*av1_refining_search_fn_t)(const MACROBLOCK *x, MV *ref_mv,
                                          int sad_per_bit, int distance,
                                          const aom_variance_fn_ptr_t *fn_ptr,
                                          const MV *center_mv);
 
-typedef int (*vp10_diamond_search_fn_t)(
+typedef int (*av1_diamond_search_fn_t)(
     const MACROBLOCK *x, const search_site_config *cfg, MV *ref_mv, MV *best_mv,
     int search_param, int sad_per_bit, int *num00,
     const aom_variance_fn_ptr_t *fn_ptr, const MV *center_mv);
 
-int vp10_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
+int av1_refining_search_8p_c(const MACROBLOCK *x, MV *ref_mv,
                               int error_per_bit, int search_range,
                               const aom_variance_fn_ptr_t *fn_ptr,
                               const MV *center_mv, const uint8_t *second_pred);
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
+int av1_full_pixel_search(struct AV1_COMP *cpi, MACROBLOCK *x,
                            BLOCK_SIZE bsize, MV *mvp_full, int step_param,
                            int error_per_bit, int *cost_list, const MV *ref_mv,
                            MV *tmp_mv, int var_max, int rd);
@@ -137,4 +137,4 @@ int vp10_full_pixel_search(struct VP10_COMP *cpi, MACROBLOCK *x,
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_MCOMP_H_
+#endif  // AV1_ENCODER_MCOMP_H_
diff --git a/av1/encoder/mips/msa/error_msa.c b/av1/encoder/mips/msa/error_msa.c
index 6707976b1c120d022e5e14051e489fe40cd11524..1890528d063b99e7cb32672f8a6965e6bfb863d1 100644
--- a/av1/encoder/mips/msa/error_msa.c
+++ b/av1/encoder/mips/msa/error_msa.c
@@ -87,7 +87,7 @@ BLOCK_ERROR_BLOCKSIZE_MSA(256)
 BLOCK_ERROR_BLOCKSIZE_MSA(1024)
 /* clang-format on */
 
-int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
+int64_t av1_block_error_msa(const tran_low_t *coeff_ptr,
                              const tran_low_t *dq_coeff_ptr, intptr_t blk_size,
                              int64_t *ssz) {
   int64_t err;
@@ -100,7 +100,7 @@ int64_t vp10_block_error_msa(const tran_low_t *coeff_ptr,
     case 256: err = block_error_256size_msa(coeff, dq_coeff, ssz); break;
     case 1024: err = block_error_1024size_msa(coeff, dq_coeff, ssz); break;
     default:
-      err = vp10_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
+      err = av1_block_error_c(coeff_ptr, dq_coeff_ptr, blk_size, ssz);
       break;
   }
 
diff --git a/av1/encoder/mips/msa/fdct16x16_msa.c b/av1/encoder/mips/msa/fdct16x16_msa.c
index 2664dd6aef32f3ecd59a39a7311983e41421c285..469b0f9f77d037bed15181f68a1f093fee61c505 100644
--- a/av1/encoder/mips/msa/fdct16x16_msa.c
+++ b/av1/encoder/mips/msa/fdct16x16_msa.c
@@ -404,7 +404,7 @@ static void postproc_fdct16x8_1d_row(int16_t *intermediate, int16_t *output) {
   ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, out, 16);
 }
 
-void vp10_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
+void av1_fht16x16_msa(const int16_t *input, int16_t *output, int32_t stride,
                        int32_t tx_type) {
   DECLARE_ALIGNED(32, int16_t, tmp[256]);
   DECLARE_ALIGNED(32, int16_t, trans_buf[256]);
diff --git a/av1/encoder/mips/msa/fdct4x4_msa.c b/av1/encoder/mips/msa/fdct4x4_msa.c
index c60c8593e1cfebd2e2cd02fa7adc0162c81149c3..1bc7fe41114cf0d87bcf7c1ef7929fc033654401 100644
--- a/av1/encoder/mips/msa/fdct4x4_msa.c
+++ b/av1/encoder/mips/msa/fdct4x4_msa.c
@@ -14,7 +14,7 @@
 #include "av1/common/enums.h"
 #include "av1/encoder/mips/msa/fdct_msa.h"
 
-void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
+void av1_fwht4x4_msa(const int16_t *input, int16_t *output,
                       int32_t src_stride) {
   v8i16 in0, in1, in2, in3, in4;
 
@@ -46,7 +46,7 @@ void vp10_fwht4x4_msa(const int16_t *input, int16_t *output,
   ST4x2_UB(in2, output + 12, 4);
 }
 
-void vp10_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
+void av1_fht4x4_msa(const int16_t *input, int16_t *output, int32_t stride,
                      int32_t tx_type) {
   v8i16 in0, in1, in2, in3;
 
diff --git a/av1/encoder/mips/msa/fdct8x8_msa.c b/av1/encoder/mips/msa/fdct8x8_msa.c
index 114cab55a3e34abf39fa7d8a514e1dbdc5acb393..7a2bde6d5077b957cf525907c73bfbdf42911f38 100644
--- a/av1/encoder/mips/msa/fdct8x8_msa.c
+++ b/av1/encoder/mips/msa/fdct8x8_msa.c
@@ -14,7 +14,7 @@
 #include "av1/common/enums.h"
 #include "av1/encoder/mips/msa/fdct_msa.h"
 
-void vp10_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
+void av1_fht8x8_msa(const int16_t *input, int16_t *output, int32_t stride,
                      int32_t tx_type) {
   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
 
diff --git a/av1/encoder/mips/msa/fdct_msa.h b/av1/encoder/mips/msa/fdct_msa.h
index 37fe131a49d52fe87510b4619b4a8b2931ccc38b..7f02df0bee5cf9f0dbcdcc4815ae9e0f91638f32 100644
--- a/av1/encoder/mips/msa/fdct_msa.h
+++ b/av1/encoder/mips/msa/fdct_msa.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
-#define VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#ifndef AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
+#define AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
 
 #include "aom_dsp/mips/fwd_txfm_msa.h"
 #include "aom_dsp/mips/txfm_macros_msa.h"
@@ -114,4 +114,4 @@
     PCKEV_H4_SH(in0_r_m, in0_r_m, in1_r_m, in1_r_m, s2_m, s2_m, s3_m, s3_m, \
                 out0, out1, out2, out3);                                    \
   }
-#endif  // VP10_ENCODER_MIPS_MSA_VP10_FDCT_MSA_H_
+#endif  // AV1_ENCODER_MIPS_MSA_AV1_FDCT_MSA_H_
diff --git a/av1/encoder/mips/msa/temporal_filter_msa.c b/av1/encoder/mips/msa/temporal_filter_msa.c
index d690a3e373105e389de9718cca580c405cc6ab26..9f0c8b7fb151bd30a8d9821228bdc23f066e75b6 100644
--- a/av1/encoder/mips/msa/temporal_filter_msa.c
+++ b/av1/encoder/mips/msa/temporal_filter_msa.c
@@ -266,7 +266,7 @@ static void temporal_filter_apply_16size_msa(uint8_t *frm1_ptr, uint32_t stride,
   }
 }
 
-void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
+void av1_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
                                     uint8_t *frame2_ptr, uint32_t blk_w,
                                     uint32_t blk_h, int32_t strength,
                                     int32_t filt_wgt, uint32_t *accu,
@@ -278,7 +278,7 @@ void vp10_temporal_filter_apply_msa(uint8_t *frame1_ptr, uint32_t stride,
     temporal_filter_apply_16size_msa(frame1_ptr, stride, frame2_ptr, strength,
                                      filt_wgt, accu, cnt);
   } else {
-    vp10_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
+    av1_temporal_filter_apply_c(frame1_ptr, stride, frame2_ptr, blk_w, blk_h,
                                  strength, filt_wgt, accu, cnt);
   }
 }
diff --git a/av1/encoder/pickdering.c b/av1/encoder/pickdering.c
index fd9d62de65b1312d6a3960078b23d95cf3f9356f..05c0877606b68a73ecf6a68ab3676ab5719841bb 100644
--- a/av1/encoder/pickdering.c
+++ b/av1/encoder/pickdering.c
@@ -33,8 +33,8 @@ static double compute_dist(int16_t *x, int xstride, int16_t *y, int ystride,
   return sum/(double)(1 << 2*coeff_shift);
 }
 
-int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
-                       VP10_COMMON *cm,
+int av1_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
+                       AV1_COMMON *cm,
                        MACROBLOCKD *xd) {
   int r, c;
   int sbr, sbc;
@@ -58,7 +58,7 @@ int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
   src = aom_malloc(sizeof(*src)*cm->mi_rows*cm->mi_cols*64);
   ref_coeff = aom_malloc(sizeof(*ref_coeff)*cm->mi_rows*cm->mi_cols*64);
   bskip = aom_malloc(sizeof(*bskip)*cm->mi_rows*cm->mi_cols);
-  vp10_setup_dst_planes(xd->plane, frame, 0, 0);
+  av1_setup_dst_planes(xd->plane, frame, 0, 0);
   for (pli = 0; pli < 3; pli++) {
     dec[pli] = xd->plane[pli].subsampling_x;
     bsize[pli] = 8 >> dec[pli];
@@ -66,7 +66,7 @@ int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
   stride = bsize[0]*cm->mi_cols;
   for (r = 0; r < bsize[0]*cm->mi_rows; ++r) {
     for (c = 0; c < bsize[0]*cm->mi_cols; ++c) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (cm->use_highbitdepth) {
         src[r * stride + c] =
             CONVERT_TO_SHORTPTR(xd->plane[0].dst.buf)
@@ -78,7 +78,7 @@ int vp10_dering_search(YV12_BUFFER_CONFIG *frame, const YV12_BUFFER_CONFIG *ref,
         src[r * stride + c] =
             xd->plane[0].dst.buf[r*xd->plane[0].dst.stride + c];
         ref_coeff[r * stride + c] = ref->y_buffer[r * ref->y_stride + c];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       }
 #endif
     }
diff --git a/av1/encoder/picklpf.c b/av1/encoder/picklpf.c
index cf3a08fa1203da228f2b0a41e75965a8722a352b..88788dfc95da4a8e9c7c906030e2c781a3679025 100644
--- a/av1/encoder/picklpf.c
+++ b/av1/encoder/picklpf.c
@@ -26,7 +26,7 @@
 #include "av1/encoder/picklpf.h"
 #include "av1/encoder/quantize.h"
 
-static int get_max_filter_level(const VP10_COMP *cpi) {
+static int get_max_filter_level(const AV1_COMP *cpi) {
   if (cpi->oxcf.pass == 2) {
     return cpi->twopass.section_intra_rating > 8 ? MAX_LOOP_FILTER * 3 / 4
                                                  : MAX_LOOP_FILTER;
@@ -36,28 +36,28 @@ static int get_max_filter_level(const VP10_COMP *cpi) {
 }
 
 static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
-                                VP10_COMP *const cpi, int filt_level,
+                                AV1_COMP *const cpi, int filt_level,
                                 int partial_frame) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   int64_t filt_err;
 
   if (cpi->num_workers > 1)
-    vp10_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
+    av1_loop_filter_frame_mt(cm->frame_to_show, cm, cpi->td.mb.e_mbd.plane,
                               filt_level, 1, partial_frame, cpi->workers,
                               cpi->num_workers, &cpi->lf_row_sync);
   else
-    vp10_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
+    av1_loop_filter_frame(cm->frame_to_show, cm, &cpi->td.mb.e_mbd, filt_level,
                            1, partial_frame);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (cm->use_highbitdepth) {
-    filt_err = vp10_highbd_get_y_sse(sd, cm->frame_to_show);
+    filt_err = av1_highbd_get_y_sse(sd, cm->frame_to_show);
   } else {
-    filt_err = vp10_get_y_sse(sd, cm->frame_to_show);
+    filt_err = av1_get_y_sse(sd, cm->frame_to_show);
   }
 #else
-  filt_err = vp10_get_y_sse(sd, cm->frame_to_show);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+  filt_err = av1_get_y_sse(sd, cm->frame_to_show);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Re-instate the unfiltered frame
   aom_yv12_copy_y(&cpi->last_frame_uf, cm->frame_to_show);
@@ -65,9 +65,9 @@ static int64_t try_filter_frame(const YV12_BUFFER_CONFIG *sd,
   return filt_err;
 }
 
-static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+static int search_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
                                int partial_frame) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const struct loopfilter *const lf = &cm->lf;
   const int min_filter_level = 0;
   const int max_filter_level = get_max_filter_level(cpi);
@@ -145,9 +145,9 @@ static int search_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
   return filt_best;
 }
 
-void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
+void av1_pick_filter_level(const YV12_BUFFER_CONFIG *sd, AV1_COMP *cpi,
                             LPF_PICK_METHOD method) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   struct loopfilter *const lf = &cm->lf;
 
   lf->sharpness_level = cm->frame_type == KEY_FRAME ? 0 : cpi->oxcf.sharpness;
@@ -157,10 +157,10 @@ void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
   } else if (method >= LPF_PICK_FROM_Q) {
     const int min_filter_level = 0;
     const int max_filter_level = get_max_filter_level(cpi);
-    const int q = vp10_ac_quant(cm->base_qindex, 0, cm->bit_depth);
+    const int q = av1_ac_quant(cm->base_qindex, 0, cm->bit_depth);
 // These values were determined by linear fitting the result of the
 // searched level, filt_guess = q * 0.316206 + 3.87252
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     int filt_guess;
     switch (cm->bit_depth) {
       case VPX_BITS_8:
@@ -180,7 +180,7 @@ void vp10_pick_filter_level(const YV12_BUFFER_CONFIG *sd, VP10_COMP *cpi,
     }
 #else
     int filt_guess = ROUND_POWER_OF_TWO(q * 20723 + 1015158, 18);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     if (cm->frame_type == KEY_FRAME) filt_guess -= 4;
     lf->filter_level = clamp(filt_guess, min_filter_level, max_filter_level);
   } else {
diff --git a/av1/encoder/picklpf.h b/av1/encoder/picklpf.h
index 428c944c0433a32b8e89b9a37efeeb3d35b75e15..44c9ee5c9dd532651cd24c5e822939c38b598b1a 100644
--- a/av1/encoder/picklpf.h
+++ b/av1/encoder/picklpf.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_PICKLPF_H_
-#define VP10_ENCODER_PICKLPF_H_
+#ifndef AV1_ENCODER_PICKLPF_H_
+#define AV1_ENCODER_PICKLPF_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -19,12 +19,12 @@ extern "C" {
 #include "av1/encoder/encoder.h"
 
 struct yv12_buffer_config;
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_pick_filter_level(const struct yv12_buffer_config *sd,
-                            struct VP10_COMP *cpi, LPF_PICK_METHOD method);
+void av1_pick_filter_level(const struct yv12_buffer_config *sd,
+                            struct AV1_COMP *cpi, LPF_PICK_METHOD method);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_PICKLPF_H_
+#endif  // AV1_ENCODER_PICKLPF_H_
diff --git a/av1/encoder/quantize.c b/av1/encoder/quantize.c
index a0dd2b717ddb3d84429a22f71b7e35a14ca59bed..5ad24566b5ca92a28c4c946a3af476d9e8594228 100644
--- a/av1/encoder/quantize.c
+++ b/av1/encoder/quantize.c
@@ -21,7 +21,7 @@
 #include "av1/encoder/quantize.h"
 #include "av1/encoder/rd.h"
 
-void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                         int skip_block, const int16_t *zbin_ptr,
                         const int16_t *round_ptr, const int16_t *quant_ptr,
                         const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -77,8 +77,8 @@ void vp10_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
                                int skip_block, const int16_t *zbin_ptr,
                                const int16_t *round_ptr,
                                const int16_t *quant_ptr,
@@ -137,7 +137,7 @@ void vp10_highbd_quantize_fp_c(const tran_low_t *coeff_ptr, intptr_t count,
 
 // TODO(jingning) Refactor this file and combine functions with similar
 // operations.
-void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                               int skip_block, const int16_t *zbin_ptr,
                               const int16_t *round_ptr,
                               const int16_t *quant_ptr,
@@ -199,8 +199,8 @@ void vp10_quantize_fp_32x32_c(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
   *eob_ptr = eob + 1;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_quantize_fp_32x32_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_quantize_fp_32x32_c(
     const tran_low_t *coeff_ptr, intptr_t n_coeffs, int skip_block,
     const int16_t *zbin_ptr, const int16_t *round_ptr, const int16_t *quant_ptr,
     const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
@@ -259,7 +259,7 @@ void vp10_highbd_quantize_fp_32x32_c(
 }
 #endif
 
-void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
+void av1_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
                                  const int16_t *scan, const int16_t *iscan) {
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *p = &x->plane[plane];
@@ -271,7 +271,7 @@ void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
   const qm_val_t *iqmatrix = pd->seg_iqmatrix[seg_id][is_intra][0];
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     aom_highbd_quantize_b(BLOCK_OFFSET(p->coeff, block), 16, x->skip_block,
                           p->zbin, p->round, p->quant, p->quant_shift,
@@ -309,8 +309,8 @@ static void invert_quant(int16_t *quant, int16_t *shift, int d) {
 }
 
 static int get_qzbin_factor(int q, aom_bit_depth_t bit_depth) {
-  const int quant = vp10_dc_quant(q, 0, bit_depth);
-#if CONFIG_VPX_HIGHBITDEPTH
+  const int quant = av1_dc_quant(q, 0, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
     case VPX_BITS_8:
       return q == 0 ? 64 : (quant < 148 ? 84 : 80);
@@ -328,8 +328,8 @@ static int get_qzbin_factor(int q, aom_bit_depth_t bit_depth) {
 #endif
 }
 
-void vp10_init_quantizer(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_init_quantizer(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   QUANTS *const quants = &cpi->quants;
   int i, q, quant;
 
@@ -342,8 +342,8 @@ void vp10_init_quantizer(VP10_COMP *cpi) {
       if (q == 0) qrounding_factor_fp = 64;
 
       // y
-      quant = i == 0 ? vp10_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
-                     : vp10_ac_quant(q, 0, cm->bit_depth);
+      quant = i == 0 ? av1_dc_quant(q, cm->y_dc_delta_q, cm->bit_depth)
+                     : av1_ac_quant(q, 0, cm->bit_depth);
       invert_quant(&quants->y_quant[q][i], &quants->y_quant_shift[q][i], quant);
       quants->y_quant_fp[q][i] = (1 << 16) / quant;
       quants->y_round_fp[q][i] = (qrounding_factor_fp * quant) >> 7;
@@ -352,8 +352,8 @@ void vp10_init_quantizer(VP10_COMP *cpi) {
       cpi->y_dequant[q][i] = quant;
 
       // uv
-      quant = i == 0 ? vp10_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
-                     : vp10_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
+      quant = i == 0 ? av1_dc_quant(q, cm->uv_dc_delta_q, cm->bit_depth)
+                     : av1_ac_quant(q, cm->uv_ac_delta_q, cm->bit_depth);
       invert_quant(&quants->uv_quant[q][i], &quants->uv_quant_shift[q][i],
                    quant);
       quants->uv_quant_fp[q][i] = (1 << 16) / quant;
@@ -383,13 +383,13 @@ void vp10_init_quantizer(VP10_COMP *cpi) {
   }
 }
 
-void vp10_init_plane_quantizers(VP10_COMP *cpi, MACROBLOCK *x) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_init_plane_quantizers(AV1_COMP *cpi, MACROBLOCK *x) {
+  const AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   QUANTS *const quants = &cpi->quants;
   const int segment_id = xd->mi[0]->mbmi.segment_id;
-  const int qindex = vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex);
-  const int rdmult = vp10_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
+  const int qindex = av1_get_qindex(&cm->seg, segment_id, cm->base_qindex);
+  const int rdmult = av1_compute_rd_mult(cpi, qindex + cm->y_dc_delta_q);
   int i;
 #if CONFIG_AOM_QM
   int minqm = cm->min_qmlevel;
@@ -443,15 +443,15 @@ void vp10_init_plane_quantizers(VP10_COMP *cpi, MACROBLOCK *x) {
 
   set_error_per_bit(x, rdmult);
 
-  vp10_initialize_me_consts(cpi, x, x->q_index);
+  av1_initialize_me_consts(cpi, x, x->q_index);
 }
 
-void vp10_frame_init_quantizer(VP10_COMP *cpi) {
-  vp10_init_plane_quantizers(cpi, &cpi->td.mb);
+void av1_frame_init_quantizer(AV1_COMP *cpi) {
+  av1_init_plane_quantizers(cpi, &cpi->td.mb);
 }
 
-void vp10_set_quantizer(VP10_COMMON *cm, int q) {
-  // quantizer has to be reinitialized with vp10_init_quantizer() if any
+void av1_set_quantizer(AV1_COMMON *cm, int q) {
+  // quantizer has to be reinitialized with av1_init_quantizer() if any
   // delta_q changes.
   cm->base_qindex = q;
   cm->y_dc_delta_q = 0;
@@ -469,11 +469,11 @@ static const int quantizer_to_qindex[] = {
   208, 212, 216, 220, 224, 228, 232, 236, 240, 244, 249, 255,
 };
 
-int vp10_quantizer_to_qindex(int quantizer) {
+int av1_quantizer_to_qindex(int quantizer) {
   return quantizer_to_qindex[quantizer];
 }
 
-int vp10_qindex_to_quantizer(int qindex) {
+int av1_qindex_to_quantizer(int qindex) {
   int quantizer;
 
   for (quantizer = 0; quantizer < 64; ++quantizer)
diff --git a/av1/encoder/quantize.h b/av1/encoder/quantize.h
index a3d252f4e11940b82a2bed16294d66dfc995706a..c1d581051c8b070571893d9c099469ff87a4b8d3 100644
--- a/av1/encoder/quantize.h
+++ b/av1/encoder/quantize.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_QUANTIZE_H_
-#define VP10_ENCODER_QUANTIZE_H_
+#ifndef AV1_ENCODER_QUANTIZE_H_
+#define AV1_ENCODER_QUANTIZE_H_
 
 #include "./aom_config.h"
 #include "av1/common/quant_common.h"
@@ -39,26 +39,26 @@ typedef struct {
   DECLARE_ALIGNED(16, int16_t, uv_round[QINDEX_RANGE][8]);
 } QUANTS;
 
-void vp10_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
+void av1_regular_quantize_b_4x4(MACROBLOCK *x, int plane, int block,
                                  const int16_t *scan, const int16_t *iscan);
 
-struct VP10_COMP;
-struct VP10Common;
+struct AV1_COMP;
+struct AV1Common;
 
-void vp10_frame_init_quantizer(struct VP10_COMP *cpi);
+void av1_frame_init_quantizer(struct AV1_COMP *cpi);
 
-void vp10_init_plane_quantizers(struct VP10_COMP *cpi, MACROBLOCK *x);
+void av1_init_plane_quantizers(struct AV1_COMP *cpi, MACROBLOCK *x);
 
-void vp10_init_quantizer(struct VP10_COMP *cpi);
+void av1_init_quantizer(struct AV1_COMP *cpi);
 
-void vp10_set_quantizer(struct VP10Common *cm, int q);
+void av1_set_quantizer(struct AV1Common *cm, int q);
 
-int vp10_quantizer_to_qindex(int quantizer);
+int av1_quantizer_to_qindex(int quantizer);
 
-int vp10_qindex_to_quantizer(int qindex);
+int av1_qindex_to_quantizer(int qindex);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_QUANTIZE_H_
+#endif  // AV1_ENCODER_QUANTIZE_H_
diff --git a/av1/encoder/ratectrl.c b/av1/encoder/ratectrl.c
index bcb5ab8c741f2802af2f9c4b7a33209c23c74406..79dc74cbc32573a4491c7d079680490847d15bab 100644
--- a/av1/encoder/ratectrl.c
+++ b/av1/encoder/ratectrl.c
@@ -46,7 +46,7 @@
 
 #define FRAME_OVERHEAD_BITS 200
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define ASSIGN_MINQ_TABLE(bit_depth, name)                   \
   do {                                                       \
     switch (bit_depth) {                                     \
@@ -76,7 +76,7 @@ static int arfgf_high_motion_minq_8[QINDEX_RANGE];
 static int inter_minq_8[QINDEX_RANGE];
 static int rtc_minq_8[QINDEX_RANGE];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int kf_low_motion_minq_10[QINDEX_RANGE];
 static int kf_high_motion_minq_10[QINDEX_RANGE];
 static int arfgf_low_motion_minq_10[QINDEX_RANGE];
@@ -110,7 +110,7 @@ static int get_minq_index(double maxq, double x3, double x2, double x1,
   if (minqtarget <= 2.0) return 0;
 
   for (i = 0; i < QINDEX_RANGE; i++) {
-    if (minqtarget <= vp10_convert_qindex_to_q(i, bit_depth)) return i;
+    if (minqtarget <= av1_convert_qindex_to_q(i, bit_depth)) return i;
   }
 
   return QINDEX_RANGE - 1;
@@ -121,7 +121,7 @@ static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
                            aom_bit_depth_t bit_depth) {
   int i;
   for (i = 0; i < QINDEX_RANGE; i++) {
-    const double maxq = vp10_convert_qindex_to_q(i, bit_depth);
+    const double maxq = av1_convert_qindex_to_q(i, bit_depth);
     kf_low_m[i] = get_minq_index(maxq, 0.000001, -0.0004, 0.150, bit_depth);
     kf_high_m[i] = get_minq_index(maxq, 0.0000021, -0.00125, 0.55, bit_depth);
     arfgf_low[i] = get_minq_index(maxq, 0.0000015, -0.0009, 0.30, bit_depth);
@@ -131,11 +131,11 @@ static void init_minq_luts(int *kf_low_m, int *kf_high_m, int *arfgf_low,
   }
 }
 
-void vp10_rc_init_minq_luts(void) {
+void av1_rc_init_minq_luts(void) {
   init_minq_luts(kf_low_motion_minq_8, kf_high_motion_minq_8,
                  arfgf_low_motion_minq_8, arfgf_high_motion_minq_8,
                  inter_minq_8, rtc_minq_8, VPX_BITS_8);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   init_minq_luts(kf_low_motion_minq_10, kf_high_motion_minq_10,
                  arfgf_low_motion_minq_10, arfgf_high_motion_minq_10,
                  inter_minq_10, rtc_minq_10, VPX_BITS_10);
@@ -148,25 +148,25 @@ void vp10_rc_init_minq_luts(void) {
 // These functions use formulaic calculations to make playing with the
 // quantizer tables easier. If necessary they can be replaced by lookup
 // tables if and when things settle down in the experimental bitstream
-double vp10_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth) {
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth) {
 // Convert the index to a real Q value (scaled down to match old Q values)
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
-    case VPX_BITS_10: return vp10_ac_quant(qindex, 0, bit_depth) / 16.0;
-    case VPX_BITS_12: return vp10_ac_quant(qindex, 0, bit_depth) / 64.0;
+    case VPX_BITS_8: return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
+    case VPX_BITS_10: return av1_ac_quant(qindex, 0, bit_depth) / 16.0;
+    case VPX_BITS_12: return av1_ac_quant(qindex, 0, bit_depth) / 64.0;
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1.0;
   }
 #else
-  return vp10_ac_quant(qindex, 0, bit_depth) / 4.0;
+  return av1_ac_quant(qindex, 0, bit_depth) / 4.0;
 #endif
 }
 
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
                         double correction_factor, aom_bit_depth_t bit_depth) {
-  const double q = vp10_convert_qindex_to_q(qindex, bit_depth);
+  const double q = av1_convert_qindex_to_q(qindex, bit_depth);
   int enumerator = frame_type == KEY_FRAME ? 2700000 : 1800000;
 
   assert(correction_factor <= MAX_BPB_FACTOR &&
@@ -177,18 +177,18 @@ int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
   return (int)(enumerator * correction_factor / q);
 }
 
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
+int av1_estimate_bits_at_q(FRAME_TYPE frame_type, int q, int mbs,
                             double correction_factor,
                             aom_bit_depth_t bit_depth) {
   const int bpm =
-      (int)(vp10_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
+      (int)(av1_rc_bits_per_mb(frame_type, q, correction_factor, bit_depth));
   return VPXMAX(FRAME_OVERHEAD_BITS,
                 (int)((uint64_t)bpm * mbs) >> BPER_MB_NORMBITS);
 }
 
-int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_pframe_target_size(const AV1_COMP *const cpi, int target) {
   const RATE_CONTROL *rc = &cpi->rc;
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   const int min_frame_target =
       VPXMAX(rc->min_frame_bandwidth, rc->avg_frame_bandwidth >> 5);
   if (target < min_frame_target) target = min_frame_target;
@@ -209,9 +209,9 @@ int vp10_rc_clamp_pframe_target_size(const VP10_COMP *const cpi, int target) {
   return target;
 }
 
-int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
+int av1_rc_clamp_iframe_target_size(const AV1_COMP *const cpi, int target) {
   const RATE_CONTROL *rc = &cpi->rc;
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   if (oxcf->rc_max_intra_bitrate_pct) {
     const int max_rate =
         rc->avg_frame_bandwidth * oxcf->rc_max_intra_bitrate_pct / 100;
@@ -222,8 +222,8 @@ int vp10_rc_clamp_iframe_target_size(const VP10_COMP *const cpi, int target) {
 }
 
 // Update the buffer level: leaky bucket model.
-static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
-  const VP10_COMMON *const cm = &cpi->common;
+static void update_buffer_level(AV1_COMP *cpi, int encoded_frame_size) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
   // Non-viewable frames are a special case and are treated as pure overhead.
@@ -238,7 +238,7 @@ static void update_buffer_level(VP10_COMP *cpi, int encoded_frame_size) {
   rc->buffer_level = rc->bits_off_target;
 }
 
-int vp10_rc_get_default_min_gf_interval(int width, int height,
+int av1_rc_get_default_min_gf_interval(int width, int height,
                                         double framerate) {
   // Assume we do not need any constraint lower than 4K 20 fps
   static const double factor_safe = 3840 * 2160 * 20.0;
@@ -257,13 +257,13 @@ int vp10_rc_get_default_min_gf_interval(int width, int height,
   // 4K60: 12
 }
 
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
+int av1_rc_get_default_max_gf_interval(double framerate, int min_gf_interval) {
   int interval = VPXMIN(MAX_GF_INTERVAL, (int)(framerate * 0.75));
   interval += (interval & 0x01);  // Round to even value
   return VPXMAX(interval, min_gf_interval);
 }
 
-void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
+void av1_rc_init(const AV1EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
   int i;
 
   if (pass == 0 && oxcf->rc_mode == VPX_CBR) {
@@ -303,7 +303,7 @@ void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
   rc->ni_frames = 0;
 
   rc->tot_q = 0.0;
-  rc->avg_q = vp10_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
+  rc->avg_q = av1_convert_qindex_to_q(oxcf->worst_allowed_q, oxcf->bit_depth);
 
   for (i = 0; i < RATE_FACTOR_LEVELS; ++i) {
     rc->rate_correction_factors[i] = 1.0;
@@ -312,16 +312,16 @@ void vp10_rc_init(const VP10EncoderConfig *oxcf, int pass, RATE_CONTROL *rc) {
   rc->min_gf_interval = oxcf->min_gf_interval;
   rc->max_gf_interval = oxcf->max_gf_interval;
   if (rc->min_gf_interval == 0)
-    rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+    rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
         oxcf->width, oxcf->height, oxcf->init_framerate);
   if (rc->max_gf_interval == 0)
-    rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+    rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
         oxcf->init_framerate, rc->min_gf_interval);
   rc->baseline_gf_interval = (rc->min_gf_interval + rc->max_gf_interval) / 2;
 }
 
-int vp10_rc_drop_frame(VP10_COMP *cpi) {
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+int av1_rc_drop_frame(AV1_COMP *cpi) {
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   RATE_CONTROL *const rc = &cpi->rc;
 
   if (!oxcf->drop_frames_water_mark) {
@@ -356,7 +356,7 @@ int vp10_rc_drop_frame(VP10_COMP *cpi) {
   }
 }
 
-static double get_rate_correction_factor(const VP10_COMP *cpi) {
+static double get_rate_correction_factor(const AV1_COMP *cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
   double rcf;
 
@@ -378,7 +378,7 @@ static double get_rate_correction_factor(const VP10_COMP *cpi) {
   return fclamp(rcf, MIN_BPB_FACTOR, MAX_BPB_FACTOR);
 }
 
-static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
+static void set_rate_correction_factor(AV1_COMP *cpi, double factor) {
   RATE_CONTROL *const rc = &cpi->rc;
 
   // Normalize RCF to account for the size-dependent scaling factor.
@@ -402,8 +402,8 @@ static void set_rate_correction_factor(VP10_COMP *cpi, double factor) {
   }
 }
 
-void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_update_rate_correction_factors(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   int correction_factor = 100;
   double rate_correction_factor = get_rate_correction_factor(cpi);
   double adjustment_limit;
@@ -421,10 +421,10 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
   // Stay in double to avoid int overflow when values are large
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cpi->common.seg.enabled) {
     projected_size_based_on_q =
-        vp10_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
+        av1_cyclic_refresh_estimate_bits_at_q(cpi, rate_correction_factor);
   } else {
     projected_size_based_on_q =
-        vp10_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
+        av1_estimate_bits_at_q(cpi->common.frame_type, cm->base_qindex,
                                 cm->MBs, rate_correction_factor, cm->bit_depth);
   }
   // Work out a size correction factor.
@@ -469,9 +469,9 @@ void vp10_rc_update_rate_correction_factors(VP10_COMP *cpi) {
   set_rate_correction_factor(cpi, rate_correction_factor);
 }
 
-int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
+int av1_rc_regulate_q(const AV1_COMP *cpi, int target_bits_per_frame,
                        int active_best_quality, int active_worst_quality) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   int q = active_worst_quality;
   int last_error = INT_MAX;
   int i, target_bits_per_mb, bits_per_mb_at_this_q;
@@ -487,9 +487,9 @@ int vp10_rc_regulate_q(const VP10_COMP *cpi, int target_bits_per_frame,
   do {
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
       bits_per_mb_at_this_q =
-          (int)vp10_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
+          (int)av1_cyclic_refresh_rc_bits_per_mb(cpi, i, correction_factor);
     } else {
-      bits_per_mb_at_this_q = (int)vp10_rc_bits_per_mb(
+      bits_per_mb_at_this_q = (int)av1_rc_bits_per_mb(
           cm->frame_type, i, correction_factor, cm->bit_depth);
     }
 
@@ -551,7 +551,7 @@ static int get_gf_active_quality(const RATE_CONTROL *const rc, int q,
                             arfgf_low_motion_minq, arfgf_high_motion_minq);
 }
 
-static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_vbr(const AV1_COMP *cpi) {
   const RATE_CONTROL *const rc = &cpi->rc;
   const unsigned int curr_frame = cpi->common.current_video_frame;
   int active_worst_quality;
@@ -573,13 +573,13 @@ static int calc_active_worst_quality_one_pass_vbr(const VP10_COMP *cpi) {
 }
 
 // Adjust active_worst_quality level based on buffer level.
-static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_active_worst_quality_one_pass_cbr(const AV1_COMP *cpi) {
   // Adjust active_worst_quality: If buffer is above the optimal/target level,
   // bring active_worst_quality down depending on fullness of buffer.
   // If buffer is below the optimal level, let the active_worst_quality go from
   // ambient Q (at buffer = optimal level) to worst_quality level
   // (at buffer = critical level).
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *rc = &cpi->rc;
   // Buffer level below which we push active_worst to worst_quality.
   int64_t critical_level = rc->optimal_buffer_level >> 3;
@@ -628,10 +628,10 @@ static int calc_active_worst_quality_one_pass_cbr(const VP10_COMP *cpi) {
   return active_worst_quality;
 }
 
-static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_cbr(const AV1_COMP *cpi,
                                              int *bottom_index,
                                              int *top_index) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
   int active_best_quality;
   int active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
@@ -646,8 +646,8 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
     // based on the ambient Q to reduce the risk of popping.
     if (rc->this_key_frame_forced) {
       int qindex = rc->last_boosted_qindex;
-      double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(
+      double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = av1_compute_qdelta(
           rc, last_boosted_q, (last_boosted_q * 0.75), cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else if (cm->current_video_frame > 0) {
@@ -665,9 +665,9 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
-      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
       active_best_quality +=
-          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+          av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -711,7 +711,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
       !(cm->current_video_frame == 0)) {
     int qdelta = 0;
     aom_clear_system_state();
-    qdelta = vp10_compute_qdelta_by_rate(
+    qdelta = av1_compute_qdelta_by_rate(
         &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
     *top_index = active_worst_quality + qdelta;
     *top_index = (*top_index > *bottom_index) ? *top_index : *bottom_index;
@@ -722,7 +722,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
   if (cm->frame_type == KEY_FRAME && rc->this_key_frame_forced) {
     q = rc->last_boosted_qindex;
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+    q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
                            active_worst_quality);
     if (q > *top_index) {
       // Special case when we are targeting the max allowed rate
@@ -740,7 +740,7 @@ static int rc_pick_q_and_bounds_one_pass_cbr(const VP10_COMP *cpi,
 }
 
 static int get_active_cq_level(const RATE_CONTROL *rc,
-                               const VP10EncoderConfig *const oxcf) {
+                               const AV1EncoderConfig *const oxcf) {
   static const double cq_adjust_threshold = 0.1;
   int active_cq_level = oxcf->cq_level;
   if (oxcf->rc_mode == VPX_CQ && rc->total_target_bits > 0) {
@@ -752,12 +752,12 @@ static int get_active_cq_level(const RATE_CONTROL *rc,
   return active_cq_level;
 }
 
-static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_one_pass_vbr(const AV1_COMP *cpi,
                                              int *bottom_index,
                                              int *top_index) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const int cq_level = get_active_cq_level(rc, oxcf);
   int active_best_quality;
   int active_worst_quality = calc_active_worst_quality_one_pass_vbr(cpi);
@@ -768,13 +768,13 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
   if (frame_is_intra_only(cm)) {
     if (oxcf->rc_mode == VPX_Q) {
       int qindex = cq_level;
-      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
+      double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = av1_compute_qdelta(rc, q, q * 0.25, cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else if (rc->this_key_frame_forced) {
       int qindex = rc->last_boosted_qindex;
-      double last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-      int delta_qindex = vp10_compute_qdelta(
+      double last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+      int delta_qindex = av1_compute_qdelta(
           rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else {
@@ -792,9 +792,9 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
-      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
       active_best_quality +=
-          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+          av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -818,12 +818,12 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
 
     } else if (oxcf->rc_mode == VPX_Q) {
       int qindex = cq_level;
-      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
       int delta_qindex;
       if (cpi->refresh_alt_ref_frame)
-        delta_qindex = vp10_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(rc, q, q * 0.40, cm->bit_depth);
       else
-        delta_qindex = vp10_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(rc, q, q * 0.50, cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
     } else {
       active_best_quality = get_gf_active_quality(rc, q, cm->bit_depth);
@@ -831,10 +831,10 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
   } else {
     if (oxcf->rc_mode == VPX_Q) {
       int qindex = cq_level;
-      double q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      double q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
       double delta_rate[FIXED_GF_INTERVAL] = { 0.50, 1.0, 0.85, 1.0,
                                                0.70, 1.0, 0.85, 1.0 };
-      int delta_qindex = vp10_compute_qdelta(
+      int delta_qindex = av1_compute_qdelta(
           rc, q, q * delta_rate[cm->current_video_frame % FIXED_GF_INTERVAL],
           cm->bit_depth);
       active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
@@ -869,11 +869,11 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
     // Limit Q range for the adaptive loop.
     if (cm->frame_type == KEY_FRAME && !rc->this_key_frame_forced &&
         !(cm->current_video_frame == 0)) {
-      qdelta = vp10_compute_qdelta_by_rate(
+      qdelta = av1_compute_qdelta_by_rate(
           &cpi->rc, cm->frame_type, active_worst_quality, 2.0, cm->bit_depth);
     } else if (!rc->is_src_frame_alt_ref &&
                (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
-      qdelta = vp10_compute_qdelta_by_rate(
+      qdelta = av1_compute_qdelta_by_rate(
           &cpi->rc, cm->frame_type, active_worst_quality, 1.75, cm->bit_depth);
     }
     *top_index = active_worst_quality + qdelta;
@@ -887,7 +887,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
   } else if ((cm->frame_type == KEY_FRAME) && rc->this_key_frame_forced) {
     q = rc->last_boosted_qindex;
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+    q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
                            active_worst_quality);
     if (q > *top_index) {
       // Special case when we are targeting the max allowed rate
@@ -905,7 +905,7 @@ static int rc_pick_q_and_bounds_one_pass_vbr(const VP10_COMP *cpi,
   return q;
 }
 
-int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
+int av1_frame_type_qdelta(const AV1_COMP *cpi, int rf_level, int q) {
   static const double rate_factor_deltas[RATE_FACTOR_LEVELS] = {
     1.00,  // INTER_NORMAL
     1.00,  // INTER_HIGH
@@ -916,19 +916,19 @@ int vp10_frame_type_qdelta(const VP10_COMP *cpi, int rf_level, int q) {
   static const FRAME_TYPE frame_type[RATE_FACTOR_LEVELS] = {
     INTER_FRAME, INTER_FRAME, INTER_FRAME, INTER_FRAME, KEY_FRAME
   };
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   int qdelta =
-      vp10_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
+      av1_compute_qdelta_by_rate(&cpi->rc, frame_type[rf_level], q,
                                   rate_factor_deltas[rf_level], cm->bit_depth);
   return qdelta;
 }
 
 #define STATIC_MOTION_THRESH 95
-static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
+static int rc_pick_q_and_bounds_two_pass(const AV1_COMP *cpi,
                                          int *bottom_index, int *top_index) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const RATE_CONTROL *const rc = &cpi->rc;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const GF_GROUP *gf_group = &cpi->twopass.gf_group;
   const int cq_level = get_active_cq_level(rc, oxcf);
   int active_best_quality;
@@ -949,15 +949,15 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
       if (cpi->twopass.last_kfgroup_zeromotion_pct >= STATIC_MOTION_THRESH) {
         qindex = VPXMIN(rc->last_kf_qindex, rc->last_boosted_qindex);
         active_best_quality = qindex;
-        last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-        delta_qindex = vp10_compute_qdelta(
+        last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(
             rc, last_boosted_q, last_boosted_q * 1.25, cm->bit_depth);
         active_worst_quality =
             VPXMIN(qindex + delta_qindex, active_worst_quality);
       } else {
         qindex = rc->last_boosted_qindex;
-        last_boosted_q = vp10_convert_qindex_to_q(qindex, cm->bit_depth);
-        delta_qindex = vp10_compute_qdelta(
+        last_boosted_q = av1_convert_qindex_to_q(qindex, cm->bit_depth);
+        delta_qindex = av1_compute_qdelta(
             rc, last_boosted_q, last_boosted_q * 0.75, cm->bit_depth);
         active_best_quality = VPXMAX(qindex + delta_qindex, rc->best_quality);
       }
@@ -979,9 +979,9 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
 
       // Convert the adjustment factor to a qindex delta
       // on active_best_quality.
-      q_val = vp10_convert_qindex_to_q(active_best_quality, cm->bit_depth);
+      q_val = av1_convert_qindex_to_q(active_best_quality, cm->bit_depth);
       active_best_quality +=
-          vp10_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
+          av1_compute_qdelta(rc, q_val, q_val * q_adj_factor, cm->bit_depth);
     }
   } else if (!rc->is_src_frame_alt_ref &&
              (cpi->refresh_golden_frame || cpi->refresh_alt_ref_frame)) {
@@ -1053,7 +1053,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
   // Static forced key frames Q restrictions dealt with elsewhere.
   if (!(frame_is_intra_only(cm)) || !rc->this_key_frame_forced ||
       (cpi->twopass.last_kfgroup_zeromotion_pct < STATIC_MOTION_THRESH)) {
-    int qdelta = vp10_frame_type_qdelta(
+    int qdelta = av1_frame_type_qdelta(
         cpi, gf_group->rf_level[gf_group->index], active_worst_quality);
     active_worst_quality =
         VPXMAX(active_worst_quality + qdelta, active_best_quality);
@@ -1062,7 +1062,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
 
   // Modify active_best_quality for downscaled normal frames.
   if (rc->frame_size_selector != UNSCALED && !frame_is_kf_gf_arf(cpi)) {
-    int qdelta = vp10_compute_qdelta_by_rate(
+    int qdelta = av1_compute_qdelta_by_rate(
         rc, cm->frame_type, active_best_quality, 2.0, cm->bit_depth);
     active_best_quality =
         VPXMAX(active_best_quality + qdelta, rc->best_quality);
@@ -1084,7 +1084,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
       q = rc->last_boosted_qindex;
     }
   } else {
-    q = vp10_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
+    q = av1_rc_regulate_q(cpi, rc->this_frame_target, active_best_quality,
                            active_worst_quality);
     if (q > active_worst_quality) {
       // Special case when we are targeting the max allowed rate.
@@ -1106,7 +1106,7 @@ static int rc_pick_q_and_bounds_two_pass(const VP10_COMP *cpi,
   return q;
 }
 
-int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
+int av1_rc_pick_q_and_bounds(const AV1_COMP *cpi, int *bottom_index,
                               int *top_index) {
   int q;
   if (cpi->oxcf.pass == 0) {
@@ -1121,7 +1121,7 @@ int vp10_rc_pick_q_and_bounds(const VP10_COMP *cpi, int *bottom_index,
   return q;
 }
 
-void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
+void av1_rc_compute_frame_size_bounds(const AV1_COMP *cpi, int frame_target,
                                        int *frame_under_shoot_limit,
                                        int *frame_over_shoot_limit) {
   if (cpi->oxcf.rc_mode == VPX_Q) {
@@ -1137,8 +1137,8 @@ void vp10_rc_compute_frame_size_bounds(const VP10_COMP *cpi, int frame_target,
   }
 }
 
-void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
-  const VP10_COMMON *const cm = &cpi->common;
+void av1_rc_set_frame_target(AV1_COMP *cpi, int target) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
 
   rc->this_frame_target = target;
@@ -1154,7 +1154,7 @@ void vp10_rc_set_frame_target(VP10_COMP *cpi, int target) {
       ((int64_t)rc->this_frame_target * 64 * 64) / (cm->width * cm->height);
 }
 
-static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
+static void update_alt_ref_frame_stats(AV1_COMP *cpi) {
   // this frame refreshes means next frames don't unless specified by user
   RATE_CONTROL *const rc = &cpi->rc;
   rc->frames_since_golden = 0;
@@ -1166,7 +1166,7 @@ static void update_alt_ref_frame_stats(VP10_COMP *cpi) {
   rc->source_alt_ref_active = 1;
 }
 
-static void update_golden_frame_stats(VP10_COMP *cpi) {
+static void update_golden_frame_stats(AV1_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
 
   // Update the Golden frame usage counts.
@@ -1195,21 +1195,21 @@ static void update_golden_frame_stats(VP10_COMP *cpi) {
   }
 }
 
-void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
-  const VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_postencode_update(AV1_COMP *cpi, uint64_t bytes_used) {
+  const AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   RATE_CONTROL *const rc = &cpi->rc;
   const int qindex = cm->base_qindex;
 
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled) {
-    vp10_cyclic_refresh_postencode(cpi);
+    av1_cyclic_refresh_postencode(cpi);
   }
 
   // Update rate control heuristics
   rc->projected_frame_size = (int)(bytes_used << 3);
 
   // Post encode loop adjustment of Q prediction.
-  vp10_rc_update_rate_correction_factors(cpi);
+  av1_rc_update_rate_correction_factors(cpi);
 
   // Keep a record of last Q and ambient average Q.
   if (cm->frame_type == KEY_FRAME) {
@@ -1223,7 +1223,7 @@ void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
       rc->avg_frame_qindex[INTER_FRAME] =
           ROUND_POWER_OF_TWO(3 * rc->avg_frame_qindex[INTER_FRAME] + qindex, 2);
       rc->ni_frames++;
-      rc->tot_q += vp10_convert_qindex_to_q(qindex, cm->bit_depth);
+      rc->tot_q += av1_convert_qindex_to_q(qindex, cm->bit_depth);
       rc->avg_q = rc->tot_q / rc->ni_frames;
       // Calculate the average Q for normal inter frames (not key or GFU
       // frames).
@@ -1288,7 +1288,7 @@ void vp10_rc_postencode_update(VP10_COMP *cpi, uint64_t bytes_used) {
   }
 }
 
-void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
+void av1_rc_postencode_update_drop_frame(AV1_COMP *cpi) {
   // Update buffer level with zero size, update frame counters, and return.
   update_buffer_level(cpi, 0);
   cpi->rc.frames_since_key++;
@@ -1300,7 +1300,7 @@ void vp10_rc_postencode_update_drop_frame(VP10_COMP *cpi) {
 // Use this macro to turn on/off use of alt-refs in one-pass mode.
 #define USE_ALTREF_FOR_ONE_PASS 1
 
-static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_pframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
   static const int af_ratio = 10;
   const RATE_CONTROL *const rc = &cpi->rc;
   int target;
@@ -1315,18 +1315,18 @@ static int calc_pframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
 #else
   target = rc->avg_frame_bandwidth;
 #endif
-  return vp10_rc_clamp_pframe_target_size(cpi, target);
+  return av1_rc_clamp_pframe_target_size(cpi, target);
 }
 
-static int calc_iframe_target_size_one_pass_vbr(const VP10_COMP *const cpi) {
+static int calc_iframe_target_size_one_pass_vbr(const AV1_COMP *const cpi) {
   static const int kf_ratio = 25;
   const RATE_CONTROL *rc = &cpi->rc;
   const int target = rc->avg_frame_bandwidth * kf_ratio;
-  return vp10_rc_clamp_iframe_target_size(cpi, target);
+  return av1_rc_clamp_iframe_target_size(cpi, target);
 }
 
-void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_vbr_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int target;
   // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1360,11 +1360,11 @@ void vp10_rc_get_one_pass_vbr_params(VP10_COMP *cpi) {
     target = calc_iframe_target_size_one_pass_vbr(cpi);
   else
     target = calc_pframe_target_size_one_pass_vbr(cpi);
-  vp10_rc_set_frame_target(cpi, target);
+  av1_rc_set_frame_target(cpi, target);
 }
 
-static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
-  const VP10EncoderConfig *oxcf = &cpi->oxcf;
+static int calc_pframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
+  const AV1EncoderConfig *oxcf = &cpi->oxcf;
   const RATE_CONTROL *rc = &cpi->rc;
   const int64_t diff = rc->optimal_buffer_level - rc->buffer_level;
   const int64_t one_pct_bits = 1 + rc->optimal_buffer_level / 100;
@@ -1402,7 +1402,7 @@ static int calc_pframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
   return VPXMAX(min_frame_target, target);
 }
 
-static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
+static int calc_iframe_target_size_one_pass_cbr(const AV1_COMP *cpi) {
   const RATE_CONTROL *rc = &cpi->rc;
   int target;
   if (cpi->common.current_video_frame == 0) {
@@ -1419,11 +1419,11 @@ static int calc_iframe_target_size_one_pass_cbr(const VP10_COMP *cpi) {
     }
     target = ((16 + kf_boost) * rc->avg_frame_bandwidth) >> 4;
   }
-  return vp10_rc_clamp_iframe_target_size(cpi, target);
+  return av1_rc_clamp_iframe_target_size(cpi, target);
 }
 
-void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_rc_get_one_pass_cbr_params(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int target;
   // TODO(yaowu): replace the "auto_key && 0" below with proper decision logic.
@@ -1440,7 +1440,7 @@ void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
   }
   if (rc->frames_till_gf_update_due == 0) {
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
-      vp10_cyclic_refresh_set_golden_update(cpi);
+      av1_cyclic_refresh_set_golden_update(cpi);
     else
       rc->baseline_gf_interval =
           (rc->min_gf_interval + rc->max_gf_interval) / 2;
@@ -1455,21 +1455,21 @@ void vp10_rc_get_one_pass_cbr_params(VP10_COMP *cpi) {
   // Any update/change of global cyclic refresh parameters (amount/delta-qp)
   // should be done here, before the frame qp is selected.
   if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ)
-    vp10_cyclic_refresh_update_parameters(cpi);
+    av1_cyclic_refresh_update_parameters(cpi);
 
   if (cm->frame_type == KEY_FRAME)
     target = calc_iframe_target_size_one_pass_cbr(cpi);
   else
     target = calc_pframe_target_size_one_pass_cbr(cpi);
 
-  vp10_rc_set_frame_target(cpi, target);
+  av1_rc_set_frame_target(cpi, target);
   if (cpi->oxcf.resize_mode == RESIZE_DYNAMIC)
-    cpi->resize_pending = vp10_resize_one_pass_cbr(cpi);
+    cpi->resize_pending = av1_resize_one_pass_cbr(cpi);
   else
     cpi->resize_pending = 0;
 }
 
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
                         aom_bit_depth_t bit_depth) {
   int start_index = rc->worst_quality;
   int target_index = rc->worst_quality;
@@ -1478,19 +1478,19 @@ int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
   // Convert the average q value to an index.
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
     start_index = i;
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= qstart) break;
+    if (av1_convert_qindex_to_q(i, bit_depth) >= qstart) break;
   }
 
   // Convert the q target to an index
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
     target_index = i;
-    if (vp10_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
+    if (av1_convert_qindex_to_q(i, bit_depth) >= qtarget) break;
   }
 
   return target_index - start_index;
 }
 
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
                                 int qindex, double rate_target_ratio,
                                 aom_bit_depth_t bit_depth) {
   int target_index = rc->worst_quality;
@@ -1498,14 +1498,14 @@ int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
 
   // Look up the current projected bits per block for the base index
   const int base_bits_per_mb =
-      vp10_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
+      av1_rc_bits_per_mb(frame_type, qindex, 1.0, bit_depth);
 
   // Find the target bits per mb based on the base value and given ratio.
   const int target_bits_per_mb = (int)(rate_target_ratio * base_bits_per_mb);
 
   // Convert the q target to an index
   for (i = rc->best_quality; i < rc->worst_quality; ++i) {
-    if (vp10_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
+    if (av1_rc_bits_per_mb(frame_type, i, 1.0, bit_depth) <=
         target_bits_per_mb) {
       target_index = i;
       break;
@@ -1514,9 +1514,9 @@ int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
   return target_index - qindex;
 }
 
-void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
+void av1_rc_set_gf_interval_range(const AV1_COMP *const cpi,
                                    RATE_CONTROL *const rc) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
 
   // Special case code for 1 pass fixed Q mode tests
   if ((oxcf->pass == 0) && (oxcf->rc_mode == VPX_Q)) {
@@ -1528,10 +1528,10 @@ void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
     rc->max_gf_interval = oxcf->max_gf_interval;
     rc->min_gf_interval = oxcf->min_gf_interval;
     if (rc->min_gf_interval == 0)
-      rc->min_gf_interval = vp10_rc_get_default_min_gf_interval(
+      rc->min_gf_interval = av1_rc_get_default_min_gf_interval(
           oxcf->width, oxcf->height, cpi->framerate);
     if (rc->max_gf_interval == 0)
-      rc->max_gf_interval = vp10_rc_get_default_max_gf_interval(
+      rc->max_gf_interval = av1_rc_get_default_max_gf_interval(
           cpi->framerate, rc->min_gf_interval);
 
     // Extended interval for genuinely static scenes
@@ -1550,9 +1550,9 @@ void vp10_rc_set_gf_interval_range(const VP10_COMP *const cpi,
   }
 }
 
-void vp10_rc_update_framerate(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+void av1_rc_update_framerate(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   RATE_CONTROL *const rc = &cpi->rc;
   int vbr_max_bits;
 
@@ -1576,12 +1576,12 @@ void vp10_rc_update_framerate(VP10_COMP *cpi) {
   rc->max_frame_bandwidth =
       VPXMAX(VPXMAX((cm->MBs * MAX_MB_RATE), MAXRATE_1080P), vbr_max_bits);
 
-  vp10_rc_set_gf_interval_range(cpi, rc);
+  av1_rc_set_gf_interval_range(cpi, rc);
 }
 
 #define VBR_PCT_ADJUSTMENT_LIMIT 50
 // For VBR...adjustment to the frame target based on error from previous frames
-static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
+static void vbr_rate_correction(AV1_COMP *cpi, int *this_frame_target) {
   RATE_CONTROL *const rc = &cpi->rc;
   int64_t vbr_bits_off_target = rc->vbr_bits_off_target;
   int max_delta;
@@ -1623,20 +1623,20 @@ static void vbr_rate_correction(VP10_COMP *cpi, int *this_frame_target) {
   }
 }
 
-void vp10_set_target_rate(VP10_COMP *cpi) {
+void av1_set_target_rate(AV1_COMP *cpi) {
   RATE_CONTROL *const rc = &cpi->rc;
   int target_rate = rc->base_frame_target;
 
   // Correction to rate target based on prior over or under shoot.
   if (cpi->oxcf.rc_mode == VPX_VBR || cpi->oxcf.rc_mode == VPX_CQ)
     vbr_rate_correction(cpi, &target_rate);
-  vp10_rc_set_frame_target(cpi, target_rate);
+  av1_rc_set_frame_target(cpi, target_rate);
 }
 
 // Check if we should resize, based on average QP from past x frames.
 // Only allow for resize at most one scale down for now, scaling factor is 2.
-int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
-  const VP10_COMMON *const cm = &cpi->common;
+int av1_resize_one_pass_cbr(AV1_COMP *cpi) {
+  const AV1_COMMON *const cm = &cpi->common;
   RATE_CONTROL *const rc = &cpi->rc;
   int resize_now = 0;
   cpi->resize_scale_num = 1;
@@ -1695,14 +1695,14 @@ int vp10_resize_one_pass_cbr(VP10_COMP *cpi) {
     rc->this_frame_target = calc_pframe_target_size_one_pass_cbr(cpi);
     // Reset cyclic refresh parameters.
     if (cpi->oxcf.aq_mode == CYCLIC_REFRESH_AQ && cm->seg.enabled)
-      vp10_cyclic_refresh_reset_resize(cpi);
+      av1_cyclic_refresh_reset_resize(cpi);
     // Get the projected qindex, based on the scaled target frame size (scaled
-    // so target_bits_per_mb in vp10_rc_regulate_q will be correct target).
+    // so target_bits_per_mb in av1_rc_regulate_q will be correct target).
     target_bits_per_frame = (resize_now == 1)
                                 ? rc->this_frame_target * tot_scale_change
                                 : rc->this_frame_target / tot_scale_change;
     active_worst_quality = calc_active_worst_quality_one_pass_cbr(cpi);
-    qindex = vp10_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
+    qindex = av1_rc_regulate_q(cpi, target_bits_per_frame, rc->best_quality,
                                 active_worst_quality);
     // If resize is down, check if projected q index is close to worst_quality,
     // and if so, reduce the rate correction factor (since likely can afford
diff --git a/av1/encoder/ratectrl.h b/av1/encoder/ratectrl.h
index 410cebfbf4393d48979aee58c04cd8a2e7d3801f..38693125b864640fdd794c46190b148f681f5797 100644
--- a/av1/encoder/ratectrl.h
+++ b/av1/encoder/ratectrl.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_RATECTRL_H_
-#define VP10_ENCODER_RATECTRL_H_
+#ifndef AV1_ENCODER_RATECTRL_H_
+#define AV1_ENCODER_RATECTRL_H_
 
 #include "aom/aom_codec.h"
 #include "aom/aom_integer.h"
@@ -145,118 +145,118 @@ typedef struct {
   int rf_level_maxq[RATE_FACTOR_LEVELS];
 } RATE_CONTROL;
 
-struct VP10_COMP;
-struct VP10EncoderConfig;
+struct AV1_COMP;
+struct AV1EncoderConfig;
 
-void vp10_rc_init(const struct VP10EncoderConfig *oxcf, int pass,
+void av1_rc_init(const struct AV1EncoderConfig *oxcf, int pass,
                   RATE_CONTROL *rc);
 
-int vp10_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
+int av1_estimate_bits_at_q(FRAME_TYPE frame_kind, int q, int mbs,
                             double correction_factor,
                             aom_bit_depth_t bit_depth);
 
-double vp10_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth);
+double av1_convert_qindex_to_q(int qindex, aom_bit_depth_t bit_depth);
 
-void vp10_rc_init_minq_luts(void);
+void av1_rc_init_minq_luts(void);
 
-int vp10_rc_get_default_min_gf_interval(int width, int height,
+int av1_rc_get_default_min_gf_interval(int width, int height,
                                         double framerate);
-// Note vp10_rc_get_default_max_gf_interval() requires the min_gf_interval to
+// Note av1_rc_get_default_max_gf_interval() requires the min_gf_interval to
 // be passed in to ensure that the max_gf_interval returned is at least as bis
 // as that.
-int vp10_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
+int av1_rc_get_default_max_gf_interval(double framerate, int min_frame_rate);
 
 // Generally at the high level, the following flow is expected
 // to be enforced for rate control:
 // First call per frame, one of:
-//   vp10_rc_get_one_pass_vbr_params()
-//   vp10_rc_get_one_pass_cbr_params()
-//   vp10_rc_get_first_pass_params()
-//   vp10_rc_get_second_pass_params()
+//   av1_rc_get_one_pass_vbr_params()
+//   av1_rc_get_one_pass_cbr_params()
+//   av1_rc_get_first_pass_params()
+//   av1_rc_get_second_pass_params()
 // depending on the usage to set the rate control encode parameters desired.
 //
 // Then, call encode_frame_to_data_rate() to perform the
 // actual encode. This function will in turn call encode_frame()
 // one or more times, followed by one of:
-//   vp10_rc_postencode_update()
-//   vp10_rc_postencode_update_drop_frame()
+//   av1_rc_postencode_update()
+//   av1_rc_postencode_update_drop_frame()
 //
 // The majority of rate control parameters are only expected
-// to be set in the vp10_rc_get_..._params() functions and
-// updated during the vp10_rc_postencode_update...() functions.
-// The only exceptions are vp10_rc_drop_frame() and
-// vp10_rc_update_rate_correction_factors() functions.
+// to be set in the av1_rc_get_..._params() functions and
+// updated during the av1_rc_postencode_update...() functions.
+// The only exceptions are av1_rc_drop_frame() and
+// av1_rc_update_rate_correction_factors() functions.
 
 // Functions to set parameters for encoding before the actual
 // encode_frame_to_data_rate() function.
-void vp10_rc_get_one_pass_vbr_params(struct VP10_COMP *cpi);
-void vp10_rc_get_one_pass_cbr_params(struct VP10_COMP *cpi);
+void av1_rc_get_one_pass_vbr_params(struct AV1_COMP *cpi);
+void av1_rc_get_one_pass_cbr_params(struct AV1_COMP *cpi);
 
 // Post encode update of the rate control parameters based
 // on bytes used
-void vp10_rc_postencode_update(struct VP10_COMP *cpi, uint64_t bytes_used);
+void av1_rc_postencode_update(struct AV1_COMP *cpi, uint64_t bytes_used);
 // Post encode update of the rate control parameters for dropped frames
-void vp10_rc_postencode_update_drop_frame(struct VP10_COMP *cpi);
+void av1_rc_postencode_update_drop_frame(struct AV1_COMP *cpi);
 
 // Updates rate correction factors
 // Changes only the rate correction factors in the rate control structure.
-void vp10_rc_update_rate_correction_factors(struct VP10_COMP *cpi);
+void av1_rc_update_rate_correction_factors(struct AV1_COMP *cpi);
 
 // Decide if we should drop this frame: For 1-pass CBR.
 // Changes only the decimation count in the rate control structure
-int vp10_rc_drop_frame(struct VP10_COMP *cpi);
+int av1_rc_drop_frame(struct AV1_COMP *cpi);
 
 // Computes frame size bounds.
-void vp10_rc_compute_frame_size_bounds(const struct VP10_COMP *cpi,
+void av1_rc_compute_frame_size_bounds(const struct AV1_COMP *cpi,
                                        int this_frame_target,
                                        int *frame_under_shoot_limit,
                                        int *frame_over_shoot_limit);
 
 // Picks q and q bounds given the target for bits
-int vp10_rc_pick_q_and_bounds(const struct VP10_COMP *cpi, int *bottom_index,
+int av1_rc_pick_q_and_bounds(const struct AV1_COMP *cpi, int *bottom_index,
                               int *top_index);
 
 // Estimates q to achieve a target bits per frame
-int vp10_rc_regulate_q(const struct VP10_COMP *cpi, int target_bits_per_frame,
+int av1_rc_regulate_q(const struct AV1_COMP *cpi, int target_bits_per_frame,
                        int active_best_quality, int active_worst_quality);
 
 // Estimates bits per mb for a given qindex and correction factor.
-int vp10_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
+int av1_rc_bits_per_mb(FRAME_TYPE frame_type, int qindex,
                         double correction_factor, aom_bit_depth_t bit_depth);
 
 // Clamping utilities for bitrate targets for iframes and pframes.
-int vp10_rc_clamp_iframe_target_size(const struct VP10_COMP *const cpi,
+int av1_rc_clamp_iframe_target_size(const struct AV1_COMP *const cpi,
                                      int target);
-int vp10_rc_clamp_pframe_target_size(const struct VP10_COMP *const cpi,
+int av1_rc_clamp_pframe_target_size(const struct AV1_COMP *const cpi,
                                      int target);
 // Utility to set frame_target into the RATE_CONTROL structure
-// This function is called only from the vp10_rc_get_..._params() functions.
-void vp10_rc_set_frame_target(struct VP10_COMP *cpi, int target);
+// This function is called only from the av1_rc_get_..._params() functions.
+void av1_rc_set_frame_target(struct AV1_COMP *cpi, int target);
 
 // Computes a q delta (in "q index" terms) to get from a starting q value
 // to a target q value
-int vp10_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
+int av1_compute_qdelta(const RATE_CONTROL *rc, double qstart, double qtarget,
                         aom_bit_depth_t bit_depth);
 
 // Computes a q delta (in "q index" terms) to get from a starting q value
 // to a value that should equate to the given rate ratio.
-int vp10_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
+int av1_compute_qdelta_by_rate(const RATE_CONTROL *rc, FRAME_TYPE frame_type,
                                 int qindex, double rate_target_ratio,
                                 aom_bit_depth_t bit_depth);
 
-int vp10_frame_type_qdelta(const struct VP10_COMP *cpi, int rf_level, int q);
+int av1_frame_type_qdelta(const struct AV1_COMP *cpi, int rf_level, int q);
 
-void vp10_rc_update_framerate(struct VP10_COMP *cpi);
+void av1_rc_update_framerate(struct AV1_COMP *cpi);
 
-void vp10_rc_set_gf_interval_range(const struct VP10_COMP *const cpi,
+void av1_rc_set_gf_interval_range(const struct AV1_COMP *const cpi,
                                    RATE_CONTROL *const rc);
 
-void vp10_set_target_rate(struct VP10_COMP *cpi);
+void av1_set_target_rate(struct AV1_COMP *cpi);
 
-int vp10_resize_one_pass_cbr(struct VP10_COMP *cpi);
+int av1_resize_one_pass_cbr(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RATECTRL_H_
+#endif  // AV1_ENCODER_RATECTRL_H_
diff --git a/av1/encoder/rd.c b/av1/encoder/rd.c
index 0d12f2d32d74c676d899890d412dff0831f608d9..8b6d521120a2c810b093bea72127ed87752f03a8 100644
--- a/av1/encoder/rd.c
+++ b/av1/encoder/rd.c
@@ -46,13 +46,13 @@
 // Factor to weigh the rate for switchable interp filters.
 #define SWITCHABLE_INTERP_RATE_FACTOR 1
 
-void vp10_rd_cost_reset(RD_COST *rd_cost) {
+void av1_rd_cost_reset(RD_COST *rd_cost) {
   rd_cost->rate = INT_MAX;
   rd_cost->dist = INT64_MAX;
   rd_cost->rdcost = INT64_MAX;
 }
 
-void vp10_rd_cost_init(RD_COST *rd_cost) {
+void av1_rd_cost_init(RD_COST *rd_cost) {
   rd_cost->rate = 0;
   rd_cost->dist = 0;
   rd_cost->rdcost = 0;
@@ -66,38 +66,38 @@ static const uint8_t rd_thresh_block_size_factor[BLOCK_SIZES] = {
   2, 3, 3, 4, 6, 6, 8, 12, 12, 16, 24, 24, 32
 };
 
-static void fill_mode_costs(VP10_COMP *cpi) {
+static void fill_mode_costs(AV1_COMP *cpi) {
   const FRAME_CONTEXT *const fc = cpi->common.fc;
   int i, j;
 
   for (i = 0; i < INTRA_MODES; ++i)
     for (j = 0; j < INTRA_MODES; ++j)
-      vp10_cost_tokens(cpi->y_mode_costs[i][j], vp10_kf_y_mode_prob[i][j],
-                       vp10_intra_mode_tree);
+      av1_cost_tokens(cpi->y_mode_costs[i][j], av1_kf_y_mode_prob[i][j],
+                       av1_intra_mode_tree);
 
-  vp10_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], vp10_intra_mode_tree);
+  av1_cost_tokens(cpi->mbmode_cost, fc->y_mode_prob[1], av1_intra_mode_tree);
   for (i = 0; i < INTRA_MODES; ++i)
-    vp10_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
-                     vp10_intra_mode_tree);
+    av1_cost_tokens(cpi->intra_uv_mode_cost[i], fc->uv_mode_prob[i],
+                     av1_intra_mode_tree);
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i)
-    vp10_cost_tokens(cpi->switchable_interp_costs[i],
+    av1_cost_tokens(cpi->switchable_interp_costs[i],
                      fc->switchable_interp_prob[i],
-                     vp10_switchable_interp_tree);
+                     av1_switchable_interp_tree);
 
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
     for (j = 0; j < TX_TYPES; ++j)
-      vp10_cost_tokens(cpi->intra_tx_type_costs[i][j],
-                       fc->intra_ext_tx_prob[i][j], vp10_ext_tx_tree);
+      av1_cost_tokens(cpi->intra_tx_type_costs[i][j],
+                       fc->intra_ext_tx_prob[i][j], av1_ext_tx_tree);
   }
   for (i = TX_4X4; i < EXT_TX_SIZES; ++i) {
-    vp10_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
-                     vp10_ext_tx_tree);
+    av1_cost_tokens(cpi->inter_tx_type_costs[i], fc->inter_ext_tx_prob[i],
+                     av1_ext_tx_tree);
   }
 }
 
-static void fill_token_costs(vp10_coeff_cost *c,
-                             vp10_coeff_probs_model (*p)[PLANE_TYPES]) {
+static void fill_token_costs(av1_coeff_cost *c,
+                             av1_coeff_probs_model (*p)[PLANE_TYPES]) {
   int i, j, k, l;
   TX_SIZE t;
   for (t = TX_4X4; t <= TX_32X32; ++t)
@@ -106,10 +106,10 @@ static void fill_token_costs(vp10_coeff_cost *c,
         for (k = 0; k < COEF_BANDS; ++k)
           for (l = 0; l < BAND_COEFF_CONTEXTS(k); ++l) {
             aom_prob probs[ENTROPY_NODES];
-            vp10_model_to_full_probs(p[t][i][j][k][l], probs);
-            vp10_cost_tokens((int *)c[t][i][j][k][0][l], probs, vp10_coef_tree);
-            vp10_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
-                                  vp10_coef_tree);
+            av1_model_to_full_probs(p[t][i][j][k][l], probs);
+            av1_cost_tokens((int *)c[t][i][j][k][0][l], probs, av1_coef_tree);
+            av1_cost_tokens_skip((int *)c[t][i][j][k][1][l], probs,
+                                  av1_coef_tree);
             assert(c[t][i][j][k][0][l][EOB_TOKEN] ==
                    c[t][i][j][k][1][l][EOB_TOKEN]);
           }
@@ -119,7 +119,7 @@ static void fill_token_costs(vp10_coeff_cost *c,
 static int sad_per_bit16lut_8[QINDEX_RANGE];
 static int sad_per_bit4lut_8[QINDEX_RANGE];
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static int sad_per_bit16lut_10[QINDEX_RANGE];
 static int sad_per_bit4lut_10[QINDEX_RANGE];
 static int sad_per_bit16lut_12[QINDEX_RANGE];
@@ -133,16 +133,16 @@ static void init_me_luts_bd(int *bit16lut, int *bit4lut, int range,
   // This is to make it easier to resolve the impact of experimental changes
   // to the quantizer tables.
   for (i = 0; i < range; i++) {
-    const double q = vp10_convert_qindex_to_q(i, bit_depth);
+    const double q = av1_convert_qindex_to_q(i, bit_depth);
     bit16lut[i] = (int)(0.0418 * q + 2.4107);
     bit4lut[i] = (int)(0.063 * q + 2.742);
   }
 }
 
-void vp10_init_me_luts(void) {
+void av1_init_me_luts(void) {
   init_me_luts_bd(sad_per_bit16lut_8, sad_per_bit4lut_8, QINDEX_RANGE,
                   VPX_BITS_8);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   init_me_luts_bd(sad_per_bit16lut_10, sad_per_bit4lut_10, QINDEX_RANGE,
                   VPX_BITS_10);
   init_me_luts_bd(sad_per_bit16lut_12, sad_per_bit4lut_12, QINDEX_RANGE,
@@ -155,9 +155,9 @@ static const int rd_boost_factor[16] = { 64, 32, 32, 32, 24, 16, 12, 12,
 static const int rd_frame_type_factor[FRAME_UPDATE_TYPES] = { 128, 144, 128,
                                                               128, 144 };
 
-int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
-  const int64_t q = vp10_dc_quant(qindex, 0, cpi->common.bit_depth);
-#if CONFIG_VPX_HIGHBITDEPTH
+int av1_compute_rd_mult(const AV1_COMP *cpi, int qindex) {
+  const int64_t q = av1_dc_quant(qindex, 0, cpi->common.bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
   int64_t rdmult = 0;
   switch (cpi->common.bit_depth) {
     case VPX_BITS_8: rdmult = 88 * q * q / 24; break;
@@ -169,7 +169,7 @@ int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
   }
 #else
   int64_t rdmult = 88 * q * q / 24;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   if (cpi->oxcf.pass == 2 && (cpi->common.frame_type != KEY_FRAME)) {
     const GF_GROUP *const gf_group = &cpi->twopass.gf_group;
     const FRAME_UPDATE_TYPE frame_type = gf_group->update_type[gf_group->index];
@@ -184,25 +184,25 @@ int vp10_compute_rd_mult(const VP10_COMP *cpi, int qindex) {
 
 static int compute_rd_thresh_factor(int qindex, aom_bit_depth_t bit_depth) {
   double q;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
-    case VPX_BITS_8: q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
-    case VPX_BITS_10: q = vp10_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
-    case VPX_BITS_12: q = vp10_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
+    case VPX_BITS_8: q = av1_dc_quant(qindex, 0, VPX_BITS_8) / 4.0; break;
+    case VPX_BITS_10: q = av1_dc_quant(qindex, 0, VPX_BITS_10) / 16.0; break;
+    case VPX_BITS_12: q = av1_dc_quant(qindex, 0, VPX_BITS_12) / 64.0; break;
     default:
       assert(0 && "bit_depth should be VPX_BITS_8, VPX_BITS_10 or VPX_BITS_12");
       return -1;
   }
 #else
   (void)bit_depth;
-  q = vp10_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+  q = av1_dc_quant(qindex, 0, VPX_BITS_8) / 4.0;
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   // TODO(debargha): Adjust the function below.
   return VPXMAX((int)(pow(q, RD_THRESH_POW) * 5.12), 8);
 }
 
-void vp10_initialize_me_consts(VP10_COMP *cpi, MACROBLOCK *x, int qindex) {
-#if CONFIG_VPX_HIGHBITDEPTH
+void av1_initialize_me_consts(AV1_COMP *cpi, MACROBLOCK *x, int qindex) {
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (cpi->common.bit_depth) {
     case VPX_BITS_8:
       x->sadperbit16 = sad_per_bit16lut_8[qindex];
@@ -223,15 +223,15 @@ void vp10_initialize_me_consts(VP10_COMP *cpi, MACROBLOCK *x, int qindex) {
   (void)cpi;
   x->sadperbit16 = sad_per_bit16lut_8[qindex];
   x->sadperbit4 = sad_per_bit4lut_8[qindex];
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
 
-static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
+static void set_block_thresholds(const AV1_COMMON *cm, RD_OPT *rd) {
   int i, bsize, segment_id;
 
   for (segment_id = 0; segment_id < MAX_SEGMENTS; ++segment_id) {
     const int qindex =
-        clamp(vp10_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
+        clamp(av1_get_qindex(&cm->seg, segment_id, cm->base_qindex) +
                   cm->y_dc_delta_q,
               0, MAXQ);
     const int q = compute_rd_thresh_factor(qindex, cm->bit_depth);
@@ -258,8 +258,8 @@ static void set_block_thresholds(const VP10_COMMON *cm, RD_OPT *rd) {
   }
 }
 
-void vp10_initialize_rd_consts(VP10_COMP *cpi) {
-  VP10_COMMON *const cm = &cpi->common;
+void av1_initialize_rd_consts(AV1_COMP *cpi) {
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &cpi->td.mb;
   RD_OPT *const rd = &cpi->rd;
   int i;
@@ -267,7 +267,7 @@ void vp10_initialize_rd_consts(VP10_COMP *cpi) {
   aom_clear_system_state();
 
   rd->RDDIV = RDDIV_BITS;  // In bits (to multiply D by 128).
-  rd->RDMULT = vp10_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
+  rd->RDMULT = av1_compute_rd_mult(cpi, cm->base_qindex + cm->y_dc_delta_q);
 
   set_error_per_bit(x, rd->RDMULT);
 
@@ -283,21 +283,21 @@ void vp10_initialize_rd_consts(VP10_COMP *cpi) {
   if (cpi->sf.partition_search_type != VAR_BASED_PARTITION ||
       cm->frame_type == KEY_FRAME) {
     for (i = 0; i < PARTITION_CONTEXTS; ++i)
-      vp10_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
-                       vp10_partition_tree);
+      av1_cost_tokens(cpi->partition_cost[i], cm->fc->partition_prob[i],
+                       av1_partition_tree);
   }
 
   fill_mode_costs(cpi);
 
   if (!frame_is_intra_only(cm)) {
-    vp10_build_nmv_cost_table(
+    av1_build_nmv_cost_table(
         x->nmvjointcost,
         cm->allow_high_precision_mv ? x->nmvcost_hp : x->nmvcost, &cm->fc->nmvc,
         cm->allow_high_precision_mv);
 
     for (i = 0; i < INTER_MODE_CONTEXTS; ++i)
-      vp10_cost_tokens((int *)cpi->inter_mode_cost[i],
-                       cm->fc->inter_mode_probs[i], vp10_inter_mode_tree);
+      av1_cost_tokens((int *)cpi->inter_mode_cost[i],
+                       cm->fc->inter_mode_probs[i], av1_inter_mode_tree);
   }
 }
 
@@ -367,7 +367,7 @@ static void model_rd_norm(int xsq_q10, int *r_q10, int *d_q10) {
   *d_q10 = (dist_tab_q10[xq] * b_q10 + dist_tab_q10[xq + 1] * a_q10) >> 10;
 }
 
-void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
+void av1_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
                                    unsigned int qstep, int *rate,
                                    int64_t *dist) {
   // This function models the rate and distortion for a Laplacian
@@ -391,7 +391,7 @@ void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n_log2,
   }
 }
 
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
                                const struct macroblockd_plane *pd,
                                ENTROPY_CONTEXT t_above[16],
                                ENTROPY_CONTEXT t_left[16]) {
@@ -429,7 +429,7 @@ void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
   }
 }
 
-void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+void av1_mv_pred(AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
                   int ref_y_stride, int ref_frame, BLOCK_SIZE block_size) {
   int i;
   int zero_seen = 0;
@@ -482,7 +482,7 @@ void vp10_mv_pred(VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
   x->pred_mv_sad[ref_frame] = best_sad;
 }
 
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
+void av1_setup_pred_block(const MACROBLOCKD *xd,
                            struct buf_2d dst[MAX_MB_PLANE],
                            const YV12_BUFFER_CONFIG *src, int mi_row,
                            int mi_col, const struct scale_factors *scale,
@@ -502,7 +502,7 @@ void vp10_setup_pred_block(const MACROBLOCKD *xd,
   }
 }
 
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
                              int stride) {
   const int bw = b_width_log2_lookup[plane_bsize];
   const int y = 4 * (raster_block >> bw);
@@ -510,15 +510,15 @@ int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
   return y * stride + x;
 }
 
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
                                         int raster_block, int16_t *base) {
   const int stride = 4 * num_4x4_blocks_wide_lookup[plane_bsize];
-  return base + vp10_raster_block_offset(plane_bsize, raster_block, stride);
+  return base + av1_raster_block_offset(plane_bsize, raster_block, stride);
 }
 
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const AV1_COMP *cpi,
                                               int ref_frame) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const int scaled_idx = cpi->scaled_ref_idx[ref_frame - 1];
   const int ref_idx = get_ref_frame_buf_idx(cpi, ref_frame);
   return (scaled_idx != ref_idx && scaled_idx != INVALID_IDX)
@@ -526,15 +526,15 @@ YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const VP10_COMP *cpi,
              : NULL;
 }
 
-int vp10_get_switchable_rate(const VP10_COMP *cpi,
+int av1_get_switchable_rate(const AV1_COMP *cpi,
                              const MACROBLOCKD *const xd) {
   const MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-  const int ctx = vp10_get_pred_context_switchable_interp(xd);
+  const int ctx = av1_get_pred_context_switchable_interp(xd);
   return SWITCHABLE_INTERP_RATE_FACTOR *
          cpi->switchable_interp_costs[ctx][mbmi->interp_filter];
 }
 
-void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds(AV1_COMP *cpi) {
   int i;
   RD_OPT *const rd = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
@@ -588,7 +588,7 @@ void vp10_set_rd_speed_thresholds(VP10_COMP *cpi) {
   rd->thresh_mult[THR_D63_PRED] += 2500;
 }
 
-void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
+void av1_set_rd_speed_thresholds_sub8x8(AV1_COMP *cpi) {
   static const int thresh_mult[2][MAX_REFS] = {
     { 2500, 2500, 2500, 4500, 4500, 2500 },
     { 2000, 2000, 2000, 4000, 4000, 2000 }
@@ -598,7 +598,7 @@ void vp10_set_rd_speed_thresholds_sub8x8(VP10_COMP *cpi) {
   memcpy(rd->thresh_mult_sub8x8, thresh_mult[idx], sizeof(thresh_mult[idx]));
 }
 
-void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
+void av1_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
                                 int bsize, int best_mode_index) {
   if (rd_thresh > 0) {
     const int top_mode = bsize < BLOCK_8X8 ? MAX_REFS : MAX_MODES;
@@ -619,10 +619,10 @@ void vp10_update_rd_thresh_fact(int (*factor_buf)[MAX_MODES], int rd_thresh,
   }
 }
 
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
                                 aom_bit_depth_t bit_depth) {
-  const int q = vp10_dc_quant(qindex, qdelta, bit_depth);
-#if CONFIG_VPX_HIGHBITDEPTH
+  const int q = av1_dc_quant(qindex, qdelta, bit_depth);
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (bit_depth) {
     case VPX_BITS_8: return 20 * q;
     case VPX_BITS_10: return 5 * q;
@@ -633,5 +633,5 @@ int vp10_get_intra_cost_penalty(int qindex, int qdelta,
   }
 #else
   return 20 * q;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }
diff --git a/av1/encoder/rd.h b/av1/encoder/rd.h
index 9d253c3d4d1ddf4c0abd2a15176b3608d568a432..3f1cc98b2de0206958de665e62dbedf32365f7c9 100644
--- a/av1/encoder/rd.h
+++ b/av1/encoder/rd.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_RD_H_
-#define VP10_ENCODER_RD_H_
+#ifndef AV1_ENCODER_RD_H_
+#define AV1_ENCODER_RD_H_
 
 #include <limits.h>
 
@@ -43,7 +43,7 @@ extern "C" {
 #define RD_THRESH_INC 1
 
 // This enumerator type needs to be kept aligned with the mode order in
-// const MODE_DEFINITION vp10_mode_order[MAX_MODES] used in the rd code.
+// const MODE_DEFINITION av1_mode_order[MAX_MODES] used in the rd code.
 typedef enum {
   THR_NEARESTMV,
   THR_NEARESTA,
@@ -120,50 +120,50 @@ typedef struct RD_COST {
 } RD_COST;
 
 // Reset the rate distortion cost values to maximum (invalid) value.
-void vp10_rd_cost_reset(RD_COST *rd_cost);
+void av1_rd_cost_reset(RD_COST *rd_cost);
 // Initialize the rate distortion cost values to zero.
-void vp10_rd_cost_init(RD_COST *rd_cost);
+void av1_rd_cost_init(RD_COST *rd_cost);
 
 struct TileInfo;
 struct TileDataEnc;
-struct VP10_COMP;
+struct AV1_COMP;
 struct macroblock;
 
-int vp10_compute_rd_mult(const struct VP10_COMP *cpi, int qindex);
+int av1_compute_rd_mult(const struct AV1_COMP *cpi, int qindex);
 
-void vp10_initialize_rd_consts(struct VP10_COMP *cpi);
+void av1_initialize_rd_consts(struct AV1_COMP *cpi);
 
-void vp10_initialize_me_consts(struct VP10_COMP *cpi, MACROBLOCK *x,
+void av1_initialize_me_consts(struct AV1_COMP *cpi, MACROBLOCK *x,
                                int qindex);
 
-void vp10_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
+void av1_model_rd_from_var_lapndz(unsigned int var, unsigned int n,
                                    unsigned int qstep, int *rate,
                                    int64_t *dist);
 
-int vp10_get_switchable_rate(const struct VP10_COMP *cpi,
+int av1_get_switchable_rate(const struct AV1_COMP *cpi,
                              const MACROBLOCKD *const xd);
 
-int vp10_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
+int av1_raster_block_offset(BLOCK_SIZE plane_bsize, int raster_block,
                              int stride);
 
-int16_t *vp10_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
+int16_t *av1_raster_block_offset_int16(BLOCK_SIZE plane_bsize,
                                         int raster_block, int16_t *base);
 
-YV12_BUFFER_CONFIG *vp10_get_scaled_ref_frame(const struct VP10_COMP *cpi,
+YV12_BUFFER_CONFIG *av1_get_scaled_ref_frame(const struct AV1_COMP *cpi,
                                               int ref_frame);
 
-void vp10_init_me_luts(void);
+void av1_init_me_luts(void);
 
-void vp10_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
+void av1_get_entropy_contexts(BLOCK_SIZE bsize, TX_SIZE tx_size,
                                const struct macroblockd_plane *pd,
                                ENTROPY_CONTEXT t_above[16],
                                ENTROPY_CONTEXT t_left[16]);
 
-void vp10_set_rd_speed_thresholds(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds(struct AV1_COMP *cpi);
 
-void vp10_set_rd_speed_thresholds_sub8x8(struct VP10_COMP *cpi);
+void av1_set_rd_speed_thresholds_sub8x8(struct AV1_COMP *cpi);
 
-void vp10_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
+void av1_update_rd_thresh_fact(int (*fact)[MAX_MODES], int rd_thresh,
                                 int bsize, int best_mode_index);
 
 static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
@@ -171,7 +171,7 @@ static INLINE int rd_less_than_thresh(int64_t best_rd, int thresh,
   return best_rd < ((int64_t)thresh * thresh_fact >> 5) || thresh == INT_MAX;
 }
 
-void vp10_mv_pred(struct VP10_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
+void av1_mv_pred(struct AV1_COMP *cpi, MACROBLOCK *x, uint8_t *ref_y_buffer,
                   int ref_y_stride, int ref_frame, BLOCK_SIZE block_size);
 
 static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
@@ -179,17 +179,17 @@ static INLINE void set_error_per_bit(MACROBLOCK *x, int rdmult) {
   x->errorperbit += (x->errorperbit == 0);
 }
 
-void vp10_setup_pred_block(const MACROBLOCKD *xd,
+void av1_setup_pred_block(const MACROBLOCKD *xd,
                            struct buf_2d dst[MAX_MB_PLANE],
                            const YV12_BUFFER_CONFIG *src, int mi_row,
                            int mi_col, const struct scale_factors *scale,
                            const struct scale_factors *scale_uv);
 
-int vp10_get_intra_cost_penalty(int qindex, int qdelta,
+int av1_get_intra_cost_penalty(int qindex, int qdelta,
                                 aom_bit_depth_t bit_depth);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RD_H_
+#endif  // AV1_ENCODER_RD_H_
diff --git a/av1/encoder/rdopt.c b/av1/encoder/rdopt.c
index e00717a8fb8c48bdfc36080287afce397443db30..170cdf26e81b3f781bfa0f5d823adf7345eb0972 100644
--- a/av1/encoder/rdopt.c
+++ b/av1/encoder/rdopt.c
@@ -80,7 +80,7 @@ struct rdcost_block_args {
 };
 
 #define LAST_NEW_MV_INDEX 6
-static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
+static const MODE_DEFINITION av1_mode_order[MAX_MODES] = {
   { NEARESTMV, { LAST_FRAME, NONE } },
   { NEARESTMV, { ALTREF_FRAME, NONE } },
   { NEARESTMV, { GOLDEN_FRAME, NONE } },
@@ -122,7 +122,7 @@ static const MODE_DEFINITION vp10_mode_order[MAX_MODES] = {
   { D45_PRED, { INTRA_FRAME, NONE } },
 };
 
-static const REF_DEFINITION vp10_ref_order[MAX_REFS] = {
+static const REF_DEFINITION av1_ref_order[MAX_REFS] = {
   { { LAST_FRAME, NONE } },           { { GOLDEN_FRAME, NONE } },
   { { ALTREF_FRAME, NONE } },         { { LAST_FRAME, ALTREF_FRAME } },
   { { GOLDEN_FRAME, ALTREF_FRAME } }, { { INTRA_FRAME, NONE } },
@@ -153,7 +153,7 @@ static void swap_block_ptr(MACROBLOCK *x, PICK_MODE_CONTEXT *ctx, int m, int n,
   }
 }
 
-static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
+static void model_rd_for_sb(AV1_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
                             MACROBLOCKD *xd, int *out_rate_sum,
                             int64_t *out_dist_sum, int *skip_txfm_sb,
                             int64_t *skip_sse_sb) {
@@ -173,9 +173,9 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
   int rate;
   int64_t dist;
   const int dequant_shift =
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd - 5 :
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
                                                     3;
 
   x->pred_sse[ref] = 0;
@@ -250,7 +250,7 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
       rate_sum += rate;
       dist_sum += dist;
     } else {
-      vp10_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
+      av1_model_rd_from_var_lapndz(sum_sse, num_pels_log2_lookup[bs],
                                     pd->dequant[1] >> dequant_shift, &rate,
                                     &dist);
       rate_sum += rate;
@@ -264,7 +264,7 @@ static void model_rd_for_sb(VP10_COMP *cpi, BLOCK_SIZE bsize, MACROBLOCK *x,
   *out_dist_sum = dist_sum << 4;
 }
 
-int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+int64_t av1_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
                            intptr_t block_size, int64_t *ssz) {
   int i;
   int64_t error = 0, sqcoeff = 0;
@@ -279,7 +279,7 @@ int64_t vp10_block_error_c(const tran_low_t *coeff, const tran_low_t *dqcoeff,
   return error;
 }
 
-int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
+int64_t av1_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
                               int block_size) {
   int i;
   int64_t error = 0;
@@ -292,8 +292,8 @@ int64_t vp10_block_error_fp_c(const int16_t *coeff, const int16_t *dqcoeff,
   return error;
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
+#if CONFIG_AOM_HIGHBITDEPTH
+int64_t av1_highbd_block_error_c(const tran_low_t *coeff,
                                   const tran_low_t *dqcoeff,
                                   intptr_t block_size, int64_t *ssz, int bd) {
   int i;
@@ -313,7 +313,7 @@ int64_t vp10_highbd_block_error_c(const tran_low_t *coeff,
   *ssz = sqcoeff;
   return error;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 /* The trailing '0' is a terminator which is used inside cost_coeffs() to
  * decide whether to include cost of a trailing EOB node or not (i.e. we
@@ -342,10 +342,10 @@ static int cost_coeffs(MACROBLOCK *x, int plane, int block, ENTROPY_CONTEXT *A,
   uint8_t token_cache[32 * 32];
   int pt = combine_entropy_contexts(*A, *L);
   int c, cost;
-#if CONFIG_VPX_HIGHBITDEPTH
-  const int *cat6_high_cost = vp10_get_high_cost_table(xd->bd);
+#if CONFIG_AOM_HIGHBITDEPTH
+  const int *cat6_high_cost = av1_get_high_cost_table(xd->bd);
 #else
-  const int *cat6_high_cost = vp10_get_high_cost_table(8);
+  const int *cat6_high_cost = av1_get_high_cost_table(8);
 #endif
 
   // Check for consistency of tx_size with mode info
@@ -363,11 +363,11 @@ static int cost_coeffs(MACROBLOCK *x, int plane, int block, ENTROPY_CONTEXT *A,
     int v = qcoeff[0];
     int16_t prev_t;
     EXTRABIT e;
-    vp10_get_token_extra(v, &prev_t, &e);
+    av1_get_token_extra(v, &prev_t, &e);
     cost = (*token_costs)[0][pt][prev_t] +
-           vp10_get_cost(prev_t, e, cat6_high_cost);
+           av1_get_cost(prev_t, e, cat6_high_cost);
 
-    token_cache[0] = vp10_pt_energy_class[prev_t];
+    token_cache[0] = av1_pt_energy_class[prev_t];
     ++token_costs;
 
     // ac tokens
@@ -376,15 +376,15 @@ static int cost_coeffs(MACROBLOCK *x, int plane, int block, ENTROPY_CONTEXT *A,
       int16_t t;
 
       v = qcoeff[rc];
-      vp10_get_token_extra(v, &t, &e);
+      av1_get_token_extra(v, &t, &e);
       if (use_fast_coef_costing) {
         cost += (*token_costs)[!prev_t][!prev_t][t] +
-                vp10_get_cost(t, e, cat6_high_cost);
+                av1_get_cost(t, e, cat6_high_cost);
       } else {
         pt = get_coef_context(nb, token_cache, c);
         cost += (*token_costs)[!prev_t][pt][t] +
-                vp10_get_cost(t, e, cat6_high_cost);
-        token_cache[rc] = vp10_pt_energy_class[t];
+                av1_get_cost(t, e, cat6_high_cost);
+        token_cache[rc] = av1_pt_energy_class[t];
       }
       prev_t = t;
       if (!--band_left) {
@@ -420,15 +420,15 @@ static void dist_block(MACROBLOCK *x, int plane, int block, TX_SIZE tx_size,
   int shift = tx_size == TX_32X32 ? 0 : 2;
   tran_low_t *const coeff = BLOCK_OFFSET(p->coeff, block);
   tran_low_t *const dqcoeff = BLOCK_OFFSET(pd->dqcoeff, block);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int bd = (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) ? xd->bd : 8;
-  *out_dist = vp10_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
+  *out_dist = av1_highbd_block_error(coeff, dqcoeff, 16 << ss_txfrm_size,
                                       &this_sse, bd) >>
               shift;
 #else
   *out_dist =
-      vp10_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+      av1_block_error(coeff, dqcoeff, 16 << ss_txfrm_size, &this_sse) >> shift;
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   *out_sse = this_sse >> shift;
 }
 
@@ -454,21 +454,21 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
 
   if (!is_inter_block(mbmi)) {
     struct encode_b_args arg = { x, NULL, &mbmi->skip };
-    vp10_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
+    av1_encode_block_intra(plane, block, blk_row, blk_col, plane_bsize,
                             tx_size, &arg);
     dist_block(x, plane, block, tx_size, &dist, &sse);
   } else if (max_txsize_lookup[plane_bsize] == tx_size) {
     if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
         SKIP_TXFM_NONE) {
       // full forward transform and quantization
-      vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
+      av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
       dist_block(x, plane, block, tx_size, &dist, &sse);
     } else if (x->skip_txfm[(plane << 2) + (block >> (tx_size << 1))] ==
                SKIP_TXFM_AC_ONLY) {
       // compute DC coefficient
       tran_low_t *const coeff = BLOCK_OFFSET(x->plane[plane].coeff, block);
       tran_low_t *const dqcoeff = BLOCK_OFFSET(xd->plane[plane].dqcoeff, block);
-      vp10_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
+      av1_xform_quant_dc(x, plane, block, blk_row, blk_col, plane_bsize,
                           tx_size);
       sse = x->bsse[(plane << 2) + (block >> (tx_size << 1))] << 4;
       dist = sse;
@@ -476,7 +476,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
         const int64_t orig_sse = (int64_t)coeff[0] * coeff[0];
         const int64_t resd_sse = coeff[0] - dqcoeff[0];
         int64_t dc_correct = orig_sse - resd_sse * resd_sse;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         dc_correct >>= ((xd->bd - 8) * 2);
 #endif
         if (tx_size != TX_32X32) dc_correct >>= 2;
@@ -492,7 +492,7 @@ static void block_rd_txfm(int plane, int block, int blk_row, int blk_col,
     }
   } else {
     // full forward transform and quantization
-    vp10_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
+    av1_xform_quant(x, plane, block, blk_row, blk_col, plane_bsize, tx_size);
     dist_block(x, plane, block, tx_size, &dist, &sse);
   }
 
@@ -534,7 +534,7 @@ static void txfm_rd_in_plane(MACROBLOCK *x, int *rate, int64_t *distortion,
   const struct macroblockd_plane *const pd = &xd->plane[plane];
   TX_TYPE tx_type;
   struct rdcost_block_args args;
-  vp10_zero(args);
+  av1_zero(args);
   args.x = x;
   args.best_rd = ref_best_rd;
   args.use_fast_coef_costing = use_fast_coef_casting;
@@ -542,12 +542,12 @@ static void txfm_rd_in_plane(MACROBLOCK *x, int *rate, int64_t *distortion,
 
   if (plane == 0) xd->mi[0]->mbmi.tx_size = tx_size;
 
-  vp10_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
+  av1_get_entropy_contexts(bsize, tx_size, pd, args.t_above, args.t_left);
 
   tx_type = get_tx_type(pd->plane_type, xd, 0);
   args.so = get_scan(tx_size, tx_type);
 
-  vp10_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
+  av1_foreach_transformed_block_in_plane(xd, bsize, plane, block_rd_txfm,
                                           &args);
   if (args.exit_early) {
     *rate = INT_MAX;
@@ -562,11 +562,11 @@ static void txfm_rd_in_plane(MACROBLOCK *x, int *rate, int64_t *distortion,
   }
 }
 
-static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_largest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                    int64_t *distortion, int *skip, int64_t *sse,
                                    int64_t ref_best_rd, BLOCK_SIZE bs) {
   const TX_SIZE max_tx_size = max_txsize_lookup[bs];
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const TX_SIZE largest_tx_size = tx_mode_to_biggest_tx_size[cm->tx_mode];
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
@@ -574,9 +574,9 @@ static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
   TX_TYPE tx_type, best_tx_type = DCT_DCT;
   int r, s;
   int64_t d, psse, this_rd, best_rd = INT64_MAX;
-  aom_prob skip_prob = vp10_get_skip_prob(cm, xd);
-  int s0 = vp10_cost_bit(skip_prob, 0);
-  int s1 = vp10_cost_bit(skip_prob, 1);
+  aom_prob skip_prob = av1_get_skip_prob(cm, xd);
+  int s0 = av1_cost_bit(skip_prob, 0);
+  int s1 = av1_cost_bit(skip_prob, 1);
   const int is_inter = is_inter_block(mbmi);
 
   mbmi->tx_size = VPXMIN(max_tx_size, largest_tx_size);
@@ -619,7 +619,7 @@ static void choose_largest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
   }
 }
 
-static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_smallest_tx_size(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                     int64_t *distortion, int *skip,
                                     int64_t *sse, int64_t ref_best_rd,
                                     BLOCK_SIZE bs) {
@@ -632,15 +632,15 @@ static void choose_smallest_tx_size(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
                    mbmi->tx_size, cpi->sf.use_fast_coef_costing);
 }
 
-static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void choose_tx_size_from_rd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                    int64_t *distortion, int *skip,
                                    int64_t *psse, int64_t ref_best_rd,
                                    BLOCK_SIZE bs) {
   const TX_SIZE max_tx_size = max_txsize_lookup[bs];
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-  aom_prob skip_prob = vp10_get_skip_prob(cm, xd);
+  aom_prob skip_prob = av1_get_skip_prob(cm, xd);
   int r, s;
   int64_t d, sse;
   int64_t rd = INT64_MAX;
@@ -655,8 +655,8 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
 
   const aom_prob *tx_probs = get_tx_probs2(max_tx_size, xd, &cm->fc->tx_probs);
   assert(skip_prob > 0);
-  s0 = vp10_cost_bit(skip_prob, 0);
-  s1 = vp10_cost_bit(skip_prob, 1);
+  s0 = av1_cost_bit(skip_prob, 0);
+  s1 = av1_cost_bit(skip_prob, 1);
 
   if (tx_select) {
     start_tx = max_tx_size;
@@ -679,9 +679,9 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
       int r_tx_size = 0;
       for (m = 0; m <= n - (n == (int)max_tx_size); ++m) {
         if (m == n)
-          r_tx_size += vp10_cost_zero(tx_probs[m]);
+          r_tx_size += av1_cost_zero(tx_probs[m]);
         else
-          r_tx_size += vp10_cost_one(tx_probs[m]);
+          r_tx_size += av1_cost_one(tx_probs[m]);
       }
 
       if (n >= TX_32X32 && tx_type != DCT_DCT) {
@@ -744,7 +744,7 @@ static void choose_tx_size_from_rd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
                    cpi->sf.use_fast_coef_costing);
 }
 
-static void super_block_yrd(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static void super_block_yrd(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                             int64_t *distortion, int *skip, int64_t *psse,
                             BLOCK_SIZE bs, int64_t ref_best_rd) {
   MACROBLOCKD *xd = &x->e_mbd;
@@ -783,7 +783,7 @@ static int conditional_skipintra(PREDICTION_MODE mode,
   return 0;
 }
 
-static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
+static int64_t rd_pick_intra4x4block(AV1_COMP *cpi, MACROBLOCK *x, int row,
                                      int col, PREDICTION_MODE *best_mode,
                                      const int *bmode_costs, ENTROPY_CONTEXT *a,
                                      ENTROPY_CONTEXT *l, int *bestrate,
@@ -804,7 +804,7 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
   const int num_4x4_blocks_high = num_4x4_blocks_high_lookup[bsize];
   int idx, idy;
   uint8_t best_dst[8 * 8];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint16_t best_dst16[8 * 8];
 #endif
 
@@ -812,7 +812,7 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
   memcpy(tl, l, sizeof(tl));
   xd->mi[0]->mbmi.tx_size = TX_4X4;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
       int64_t this_rd;
@@ -837,42 +837,42 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
           const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
           uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
           int16_t *const src_diff =
-              vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+              av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
           tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
           xd->mi[0]->bmi[block].as_mode = mode;
-          vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+          av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
                                    dst_stride, col + idx, row + idy, 0);
           aom_highbd_subtract_block(4, 4, src_diff, 8, src, src_stride, dst,
                                     dst_stride, xd->bd);
           if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
             TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
             const scan_order *so = get_scan(TX_4X4, tx_type);
-            vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
-            vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+            av1_highbd_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
+            av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
             ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
                                  so->scan, so->neighbors,
                                  cpi->sf.use_fast_coef_costing);
             if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
               goto next_highbd;
-            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+            av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
                                          dst_stride, p->eobs[block], xd->bd,
                                          DCT_DCT, 1);
           } else {
             int64_t unused;
             TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
             const scan_order *so = get_scan(TX_4X4, tx_type);
-            vp10_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
-            vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+            av1_highbd_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
+            av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
             ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
                                  so->scan, so->neighbors,
                                  cpi->sf.use_fast_coef_costing);
             distortion +=
-                vp10_highbd_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
+                av1_highbd_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block),
                                         16, &unused, xd->bd) >>
                 2;
             if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
               goto next_highbd;
-            vp10_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+            av1_highbd_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
                                          dst_stride, p->eobs[block], xd->bd,
                                          tx_type, 0);
           }
@@ -907,7 +907,7 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
 
     return best_rd;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   for (mode = DC_PRED; mode <= TM_PRED; ++mode) {
     int64_t this_rd;
@@ -932,41 +932,41 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
         const uint8_t *const src = &src_init[idx * 4 + idy * 4 * src_stride];
         uint8_t *const dst = &dst_init[idx * 4 + idy * 4 * dst_stride];
         int16_t *const src_diff =
-            vp10_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
+            av1_raster_block_offset_int16(BLOCK_8X8, block, p->src_diff);
         tran_low_t *const coeff = BLOCK_OFFSET(x->plane[0].coeff, block);
         xd->mi[0]->bmi[block].as_mode = mode;
-        vp10_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
+        av1_predict_intra_block(xd, 1, 1, TX_4X4, mode, dst, dst_stride, dst,
                                  dst_stride, col + idx, row + idy, 0);
         aom_subtract_block(4, 4, src_diff, 8, src, src_stride, dst, dst_stride);
 
         if (xd->lossless[xd->mi[0]->mbmi.segment_id]) {
           TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
           const scan_order *so = get_scan(TX_4X4, tx_type);
-          vp10_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
-          vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+          av1_fwd_txfm_4x4(src_diff, coeff, 8, DCT_DCT, 1);
+          av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
           ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
                                so->scan, so->neighbors,
                                cpi->sf.use_fast_coef_costing);
           if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
             goto next;
-          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+          av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
                                 dst_stride, p->eobs[block], DCT_DCT, 1);
         } else {
           int64_t unused;
           TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, block);
           const scan_order *so = get_scan(TX_4X4, tx_type);
-          vp10_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
-          vp10_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
+          av1_fwd_txfm_4x4(src_diff, coeff, 8, tx_type, 0);
+          av1_regular_quantize_b_4x4(x, 0, block, so->scan, so->iscan);
           ratey += cost_coeffs(x, 0, block, tempa + idx, templ + idy, TX_4X4,
                                so->scan, so->neighbors,
                                cpi->sf.use_fast_coef_costing);
           distortion +=
-              vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
+              av1_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, block), 16,
                                &unused) >>
               2;
           if (RDCOST(x->rdmult, x->rddiv, ratey, distortion) >= best_rd)
             goto next;
-          vp10_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
+          av1_inv_txfm_add_4x4(BLOCK_OFFSET(pd->dqcoeff, block), dst,
                                 dst_stride, p->eobs[block], tx_type, 0);
         }
       }
@@ -999,7 +999,7 @@ static int64_t rd_pick_intra4x4block(VP10_COMP *cpi, MACROBLOCK *x, int row,
   return best_rd;
 }
 
-static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
+static int64_t rd_pick_intra_sub_8x8_y_mode(AV1_COMP *cpi, MACROBLOCK *mb,
                                             int *rate, int *rate_y,
                                             int64_t *distortion,
                                             int64_t best_rd) {
@@ -1030,8 +1030,8 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
       int64_t d = INT64_MAX, this_rd = INT64_MAX;
       i = idy * 2 + idx;
       if (cpi->common.frame_type == KEY_FRAME) {
-        const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, i);
-        const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, i);
+        const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, i);
+        const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, i);
 
         bmode_costs = cpi->y_mode_costs[A][L];
       }
@@ -1065,7 +1065,7 @@ static int64_t rd_pick_intra_sub_8x8_y_mode(VP10_COMP *cpi, MACROBLOCK *mb,
 }
 
 // This function is used only for intra_only frames
-static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_pick_intra_sby_mode(AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                                       int *rate_tokenonly, int64_t *distortion,
                                       int *skippable, BLOCK_SIZE bsize,
                                       int64_t best_rd) {
@@ -1080,8 +1080,8 @@ static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
   int *bmode_costs;
   const MODE_INFO *above_mi = xd->above_mi;
   const MODE_INFO *left_mi = xd->left_mi;
-  const PREDICTION_MODE A = vp10_above_block_mode(mic, above_mi, 0);
-  const PREDICTION_MODE L = vp10_left_block_mode(mic, left_mi, 0);
+  const PREDICTION_MODE A = av1_above_block_mode(mic, above_mi, 0);
+  const PREDICTION_MODE L = av1_left_block_mode(mic, left_mi, 0);
   bmode_costs = cpi->y_mode_costs[A][L];
 
   memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
@@ -1119,7 +1119,7 @@ static int64_t rd_pick_intra_sby_mode(VP10_COMP *cpi, MACROBLOCK *x, int *rate,
 
 // Return value 0: early termination triggered, no valid rd cost available;
 //              1: rd cost values are valid.
-static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int super_block_uvrd(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                             int64_t *distortion, int *skippable, int64_t *sse,
                             BLOCK_SIZE bsize, int64_t ref_best_rd) {
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -1135,7 +1135,7 @@ static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
   if (is_inter_block(mbmi) && is_cost_valid) {
     int plane;
     for (plane = 1; plane < MAX_MB_PLANE; ++plane)
-      vp10_subtract_plane(x, bsize, plane);
+      av1_subtract_plane(x, bsize, plane);
   }
 
   *rate = 0;
@@ -1167,7 +1167,7 @@ static int super_block_uvrd(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
   return is_cost_valid;
 }
 
-static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t rd_pick_intra_sbuv_mode(AV1_COMP *cpi, MACROBLOCK *x,
                                        PICK_MODE_CONTEXT *ctx, int *rate,
                                        int *rate_tokenonly, int64_t *distortion,
                                        int *skippable, BLOCK_SIZE bsize,
@@ -1207,7 +1207,7 @@ static int64_t rd_pick_intra_sbuv_mode(VP10_COMP *cpi, MACROBLOCK *x,
   return best_rd;
 }
 
-static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
+static int64_t rd_sbuv_dcpred(const AV1_COMP *cpi, MACROBLOCK *x, int *rate,
                               int *rate_tokenonly, int64_t *distortion,
                               int *skippable, BLOCK_SIZE bsize) {
   int64_t unused;
@@ -1221,7 +1221,7 @@ static int64_t rd_sbuv_dcpred(const VP10_COMP *cpi, MACROBLOCK *x, int *rate,
   return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
 }
 
-static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
+static void choose_intra_uv_mode(AV1_COMP *cpi, MACROBLOCK *const x,
                                  PICK_MODE_CONTEXT *ctx, BLOCK_SIZE bsize,
                                  TX_SIZE max_tx_size, int *rate_uv,
                                  int *rate_uv_tokenonly, int64_t *dist_uv,
@@ -1241,13 +1241,13 @@ static void choose_intra_uv_mode(VP10_COMP *cpi, MACROBLOCK *const x,
   *mode_uv = x->e_mbd.mi[0]->mbmi.uv_mode;
 }
 
-static int cost_mv_ref(const VP10_COMP *cpi, PREDICTION_MODE mode,
+static int cost_mv_ref(const AV1_COMP *cpi, PREDICTION_MODE mode,
                        int mode_context) {
   assert(is_inter_mode(mode));
   return cpi->inter_mode_cost[mode_context][INTER_OFFSET(mode)];
 }
 
-static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
+static int set_and_cost_bmi_mvs(AV1_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
                                 int i, PREDICTION_MODE mode, int_mv this_mv[2],
                                 int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
                                 int_mv seg_mvs[MAX_REF_FRAMES],
@@ -1265,12 +1265,12 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
   switch (mode) {
     case NEWMV:
       this_mv[0].as_int = seg_mvs[mbmi->ref_frame[0]].as_int;
-      thismvcost += vp10_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
+      thismvcost += av1_mv_bit_cost(&this_mv[0].as_mv, &best_ref_mv[0]->as_mv,
                                      mvjcost, mvcost, MV_COST_WEIGHT_SUB);
       if (is_compound) {
         this_mv[1].as_int = seg_mvs[mbmi->ref_frame[1]].as_int;
         thismvcost +=
-            vp10_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
+            av1_mv_bit_cost(&this_mv[1].as_mv, &best_ref_mv[1]->as_mv, mvjcost,
                              mvcost, MV_COST_WEIGHT_SUB);
       }
       break;
@@ -1300,7 +1300,7 @@ static int set_and_cost_bmi_mvs(VP10_COMP *cpi, MACROBLOCK *x, MACROBLOCKD *xd,
          thismvcost;
 }
 
-static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
+static int64_t encode_inter_mb_segment(AV1_COMP *cpi, MACROBLOCK *x,
                                        int64_t best_yrd, int i, int *labelyrate,
                                        int64_t *distortion, int64_t *sse,
                                        ENTROPY_CONTEXT *ta, ENTROPY_CONTEXT *tl,
@@ -1317,43 +1317,43 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
   void (*fwd_txm4x4)(const int16_t *input, tran_low_t *output, int stride);
 
   const uint8_t *const src =
-      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+      &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
   uint8_t *const dst =
-      &pd->dst.buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
+      &pd->dst.buf[av1_raster_block_offset(BLOCK_8X8, i, pd->dst.stride)];
   int64_t thisdistortion = 0, thissse = 0;
   int thisrate = 0;
   TX_TYPE tx_type = get_tx_type(PLANE_TYPE_Y, xd, i);
   const scan_order *so = get_scan(TX_4X4, tx_type);
 
-  vp10_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
+  av1_build_inter_predictor_sub8x8(xd, 0, i, ir, ic, mi_row, mi_col);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_highbd_fwht4x4
+    fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? av1_highbd_fwht4x4
                                                    : aom_highbd_fdct4x4;
   } else {
-    fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_fwht4x4 : aom_fdct4x4;
+    fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? av1_fwht4x4 : aom_fdct4x4;
   }
 #else
-  fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? vp10_fwht4x4 : aom_fdct4x4;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+  fwd_txm4x4 = xd->lossless[mi->mbmi.segment_id] ? av1_fwht4x4 : aom_fdct4x4;
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     aom_highbd_subtract_block(
         height, width,
-        vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
+        av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff), 8, src,
         p->src.stride, dst, pd->dst.stride, xd->bd);
   } else {
-    aom_subtract_block(height, width, vp10_raster_block_offset_int16(
+    aom_subtract_block(height, width, av1_raster_block_offset_int16(
                                           BLOCK_8X8, i, p->src_diff),
                        8, src, p->src.stride, dst, pd->dst.stride);
   }
 #else
   aom_subtract_block(height, width,
-                     vp10_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
+                     av1_raster_block_offset_int16(BLOCK_8X8, i, p->src_diff),
                      8, src, p->src.stride, dst, pd->dst.stride);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   k = i;
   for (idy = 0; idy < height / 4; ++idy) {
@@ -1363,21 +1363,21 @@ static int64_t encode_inter_mb_segment(VP10_COMP *cpi, MACROBLOCK *x,
 
       k += (idy * 2 + idx);
       coeff = BLOCK_OFFSET(p->coeff, k);
-      fwd_txm4x4(vp10_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
+      fwd_txm4x4(av1_raster_block_offset_int16(BLOCK_8X8, k, p->src_diff),
                  coeff, 8);
-      vp10_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
-#if CONFIG_VPX_HIGHBITDEPTH
+      av1_regular_quantize_b_4x4(x, 0, k, so->scan, so->iscan);
+#if CONFIG_AOM_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        thisdistortion += vp10_highbd_block_error(
+        thisdistortion += av1_highbd_block_error(
             coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz, xd->bd);
       } else {
         thisdistortion +=
-            vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
+            av1_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
       }
 #else
       thisdistortion +=
-          vp10_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+          av1_block_error(coeff, BLOCK_OFFSET(pd->dqcoeff, k), 16, &ssz);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       thissse += ssz;
       thisrate +=
           cost_coeffs(x, 0, k, ta + (k & 1), tl + (k >> 1), TX_4X4, so->scan,
@@ -1433,15 +1433,15 @@ static INLINE void mi_buf_shift(MACROBLOCK *x, int i) {
   struct macroblockd_plane *const pd = &x->e_mbd.plane[0];
 
   p->src.buf =
-      &p->src.buf[vp10_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
+      &p->src.buf[av1_raster_block_offset(BLOCK_8X8, i, p->src.stride)];
   assert(((intptr_t)pd->pre[0].buf & 0x7) == 0);
   pd->pre[0].buf =
       &pd->pre[0]
-           .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
+           .buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[0].stride)];
   if (has_second_ref(mbmi))
     pd->pre[1].buf =
         &pd->pre[1]
-             .buf[vp10_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
+             .buf[av1_raster_block_offset(BLOCK_8X8, i, pd->pre[1].stride)];
 }
 
 static INLINE void mi_buf_restore(MACROBLOCK *x, struct buf_2d orig_src,
@@ -1458,7 +1458,7 @@ static INLINE int mv_has_subpel(const MV *mv) {
 
 // Check if NEARESTMV/NEARMV/ZEROMV is the cheapest way encode zero motion.
 // TODO(aconverse): Find out if this is still productive then clean up or remove
-static int check_best_zero_mv(const VP10_COMP *cpi,
+static int check_best_zero_mv(const AV1_COMP *cpi,
                               const uint8_t mode_context[MAX_REF_FRAMES],
                               int_mv frame_mv[MB_MODE_COUNT][MAX_REF_FRAMES],
                               int this_mode,
@@ -1494,11 +1494,11 @@ static int check_best_zero_mv(const VP10_COMP *cpi,
   return 1;
 }
 
-static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
+static void joint_motion_search(AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
                                 int_mv *frame_mv, int mi_row, int mi_col,
                                 int_mv single_newmv[MAX_REF_FRAMES],
                                 int *rate_mv) {
-  const VP10_COMMON *const cm = &cpi->common;
+  const AV1_COMMON *const cm = &cpi->common;
   const int pw = 4 * num_4x4_blocks_wide_lookup[bsize];
   const int ph = 4 * num_4x4_blocks_high_lookup[bsize];
   MACROBLOCKD *xd = &x->e_mbd;
@@ -1507,24 +1507,24 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
                         mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1] };
   int_mv ref_mv[2];
   int ite, ref;
-  const InterpKernel *kernel = vp10_filter_kernels[mbmi->interp_filter];
+  const InterpKernel *kernel = av1_filter_kernels[mbmi->interp_filter];
   struct scale_factors sf;
 
   // Do joint motion search in compound mode to get more accurate mv.
   struct buf_2d backup_yv12[2][MAX_MB_PLANE];
   int last_besterr[2] = { INT_MAX, INT_MAX };
   const YV12_BUFFER_CONFIG *const scaled_ref_frame[2] = {
-    vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
-    vp10_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
+    av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[0]),
+    av1_get_scaled_ref_frame(cpi, mbmi->ref_frame[1])
   };
 
 // Prediction buffer from second frame.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, second_pred_alloc_16[64 * 64]);
   uint8_t *second_pred;
 #else
   DECLARE_ALIGNED(16, uint8_t, second_pred[64 * 64]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   for (ref = 0; ref < 2; ++ref) {
     ref_mv[ref] = x->mbmi_ext->ref_mvs[refs[ref]][0];
@@ -1536,7 +1536,7 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
       // motion search code to be used without additional modifications.
       for (i = 0; i < MAX_MB_PLANE; i++)
         backup_yv12[ref][i] = xd->plane[i].pre[ref];
-      vp10_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
+      av1_setup_pre_planes(xd, ref, scaled_ref_frame[ref], mi_row, mi_col,
                             NULL);
     }
 
@@ -1545,13 +1545,13 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
 
 // Since we have scaled the reference frames to match the size of the current
 // frame we must use a unit scaling factor during mode selection.
-#if CONFIG_VPX_HIGHBITDEPTH
-  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+#if CONFIG_AOM_HIGHBITDEPTH
+  av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
                                      cm->height, cm->use_highbitdepth);
 #else
-  vp10_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
+  av1_setup_scale_factors_for_frame(&sf, cm->width, cm->height, cm->width,
                                      cm->height);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Allow joint search multiple times iteratively for each reference frame
   // and break out of the search loop if it couldn't find a better mv.
@@ -1575,30 +1575,30 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
     ref_yv12[1] = xd->plane[0].pre[1];
 
 // Get the prediction block from the 'other' reference frame.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
       second_pred = CONVERT_TO_BYTEPTR(second_pred_alloc_16);
-      vp10_highbd_build_inter_predictor(
+      av1_highbd_build_inter_predictor(
           ref_yv12[!id].buf, ref_yv12[!id].stride, second_pred, pw,
           &frame_mv[refs[!id]].as_mv, &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
           mi_col * MI_SIZE, mi_row * MI_SIZE, xd->bd);
     } else {
       second_pred = (uint8_t *)second_pred_alloc_16;
-      vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+      av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
                                  second_pred, pw, &frame_mv[refs[!id]].as_mv,
                                  &sf, pw, ph, 0, kernel, MV_PRECISION_Q3,
                                  mi_col * MI_SIZE, mi_row * MI_SIZE);
     }
 #else
-    vp10_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
+    av1_build_inter_predictor(ref_yv12[!id].buf, ref_yv12[!id].stride,
                                second_pred, pw, &frame_mv[refs[!id]].as_mv, &sf,
                                pw, ph, 0, kernel, MV_PRECISION_Q3,
                                mi_col * MI_SIZE, mi_row * MI_SIZE);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     // Do compound motion search on the current reference frame.
     if (id) xd->plane[0].pre[0] = ref_yv12[id];
-    vp10_set_mv_search_range(x, &ref_mv[id].as_mv);
+    av1_set_mv_search_range(x, &ref_mv[id].as_mv);
 
     // Use the mv result from the single mode as mv predictor.
     tmp_mv = frame_mv[refs[id]].as_mv;
@@ -1607,11 +1607,11 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
     tmp_mv.row >>= 3;
 
     // Small-range full-pixel motion search.
-    bestsme = vp10_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
+    bestsme = av1_refining_search_8p_c(x, &tmp_mv, sadpb, search_range,
                                         &cpi->fn_ptr[bsize], &ref_mv[id].as_mv,
                                         second_pred);
     if (bestsme < INT_MAX)
-      bestsme = vp10_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
+      bestsme = av1_get_mvpred_av_var(x, &tmp_mv, &ref_mv[id].as_mv,
                                        second_pred, &cpi->fn_ptr[bsize], 1);
 
     x->mv_col_min = tmp_col_min;
@@ -1650,14 +1650,14 @@ static void joint_motion_search(VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize,
         xd->plane[i].pre[ref] = backup_yv12[ref][i];
     }
 
-    *rate_mv += vp10_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
+    *rate_mv += av1_mv_bit_cost(&frame_mv[refs[ref]].as_mv,
                                  &x->mbmi_ext->ref_mvs[refs[ref]][0].as_mv,
                                  x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
   }
 }
 
 static int64_t rd_pick_best_sub8x8_mode(
-    VP10_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
+    AV1_COMP *cpi, MACROBLOCK *x, int_mv *best_ref_mv,
     int_mv *second_best_ref_mv, int64_t best_rd, int *returntotrate,
     int *returnyrate, int64_t *returndistortion, int *skippable, int64_t *psse,
     int mvthresh, int_mv seg_mvs[4][MAX_REF_FRAMES], BEST_SEG_INFO *bsi_buf,
@@ -1671,7 +1671,7 @@ static int64_t rd_pick_best_sub8x8_mode(
   int k, br = 0, idx, idy;
   int64_t bd = 0, block_sse = 0;
   PREDICTION_MODE this_mode;
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   struct macroblock_plane *const p = &x->plane[0];
   struct macroblockd_plane *const pd = &xd->plane[0];
   const int label_count = 4;
@@ -1687,7 +1687,7 @@ static int64_t rd_pick_best_sub8x8_mode(
   const int inter_mode_mask = cpi->sf.inter_mode_mask[bsize];
   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
 
-  vp10_zero(*bsi);
+  av1_zero(*bsi);
 
   bsi->segment_rd = best_rd;
   bsi->ref_mv[0] = best_ref_mv;
@@ -1721,7 +1721,7 @@ static int64_t rd_pick_best_sub8x8_mode(
       for (ref = 0; ref < 1 + has_second_rf; ++ref) {
         const MV_REFERENCE_FRAME frame = mbmi->ref_frame[ref];
         frame_mv[ZEROMV][frame].as_int = 0;
-        vp10_append_sub8x8_mvs_for_idx(
+        av1_append_sub8x8_mvs_for_idx(
             cm, xd, i, ref, mi_row, mi_col, &frame_mv[NEARESTMV][frame],
             &frame_mv[NEARMV][frame], mbmi_ext->mode_context);
       }
@@ -1778,7 +1778,7 @@ static int64_t rd_pick_best_sub8x8_mode(
             // max mv magnitude and the best ref mvs of the current block for
             // the given reference.
             step_param =
-                (vp10_init_search_range(max_mv) + cpi->mv_step_param) / 2;
+                (av1_init_search_range(max_mv) + cpi->mv_step_param) / 2;
           } else {
             step_param = cpi->mv_step_param;
           }
@@ -1795,9 +1795,9 @@ static int64_t rd_pick_best_sub8x8_mode(
           // adjust src pointer for this block
           mi_buf_shift(x, i);
 
-          vp10_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
+          av1_set_mv_search_range(x, &bsi->ref_mv[0]->as_mv);
 
-          bestsme = vp10_full_pixel_search(
+          bestsme = av1_full_pixel_search(
               cpi, x, bsize, &mvp_full, step_param, sadpb,
               cpi->sf.mv.subpel_search_method != SUBPEL_TREE ? cost_list : NULL,
               &bsi->ref_mv[0]->as_mv, new_mv, INT_MAX, 1);
@@ -1985,14 +1985,14 @@ static int64_t rd_pick_best_sub8x8_mode(
   *returntotrate = bsi->r;
   *returndistortion = bsi->d;
   *returnyrate = bsi->segment_yrate;
-  *skippable = vp10_is_skippable_in_plane(x, BLOCK_8X8, 0);
+  *skippable = av1_is_skippable_in_plane(x, BLOCK_8X8, 0);
   *psse = bsi->sse;
   mbmi->mode = bsi->modes[3];
 
   return bsi->segment_rd;
 }
 
-static void estimate_ref_frame_costs(const VP10_COMMON *cm,
+static void estimate_ref_frame_costs(const AV1_COMMON *cm,
                                      const MACROBLOCKD *xd, int segment_id,
                                      unsigned int *ref_costs_single,
                                      unsigned int *ref_costs_comp,
@@ -2004,47 +2004,47 @@ static void estimate_ref_frame_costs(const VP10_COMMON *cm,
     memset(ref_costs_comp, 0, MAX_REF_FRAMES * sizeof(*ref_costs_comp));
     *comp_mode_p = 128;
   } else {
-    aom_prob intra_inter_p = vp10_get_intra_inter_prob(cm, xd);
+    aom_prob intra_inter_p = av1_get_intra_inter_prob(cm, xd);
     aom_prob comp_inter_p = 128;
 
     if (cm->reference_mode == REFERENCE_MODE_SELECT) {
-      comp_inter_p = vp10_get_reference_mode_prob(cm, xd);
+      comp_inter_p = av1_get_reference_mode_prob(cm, xd);
       *comp_mode_p = comp_inter_p;
     } else {
       *comp_mode_p = 128;
     }
 
-    ref_costs_single[INTRA_FRAME] = vp10_cost_bit(intra_inter_p, 0);
+    ref_costs_single[INTRA_FRAME] = av1_cost_bit(intra_inter_p, 0);
 
     if (cm->reference_mode != COMPOUND_REFERENCE) {
-      aom_prob ref_single_p1 = vp10_get_pred_prob_single_ref_p1(cm, xd);
-      aom_prob ref_single_p2 = vp10_get_pred_prob_single_ref_p2(cm, xd);
-      unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+      aom_prob ref_single_p1 = av1_get_pred_prob_single_ref_p1(cm, xd);
+      aom_prob ref_single_p2 = av1_get_pred_prob_single_ref_p2(cm, xd);
+      unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
 
       if (cm->reference_mode == REFERENCE_MODE_SELECT)
-        base_cost += vp10_cost_bit(comp_inter_p, 0);
+        base_cost += av1_cost_bit(comp_inter_p, 0);
 
       ref_costs_single[LAST_FRAME] = ref_costs_single[GOLDEN_FRAME] =
           ref_costs_single[ALTREF_FRAME] = base_cost;
-      ref_costs_single[LAST_FRAME] += vp10_cost_bit(ref_single_p1, 0);
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p1, 1);
-      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p1, 1);
-      ref_costs_single[GOLDEN_FRAME] += vp10_cost_bit(ref_single_p2, 0);
-      ref_costs_single[ALTREF_FRAME] += vp10_cost_bit(ref_single_p2, 1);
+      ref_costs_single[LAST_FRAME] += av1_cost_bit(ref_single_p1, 0);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p1, 1);
+      ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p1, 1);
+      ref_costs_single[GOLDEN_FRAME] += av1_cost_bit(ref_single_p2, 0);
+      ref_costs_single[ALTREF_FRAME] += av1_cost_bit(ref_single_p2, 1);
     } else {
       ref_costs_single[LAST_FRAME] = 512;
       ref_costs_single[GOLDEN_FRAME] = 512;
       ref_costs_single[ALTREF_FRAME] = 512;
     }
     if (cm->reference_mode != SINGLE_REFERENCE) {
-      aom_prob ref_comp_p = vp10_get_pred_prob_comp_ref_p(cm, xd);
-      unsigned int base_cost = vp10_cost_bit(intra_inter_p, 1);
+      aom_prob ref_comp_p = av1_get_pred_prob_comp_ref_p(cm, xd);
+      unsigned int base_cost = av1_cost_bit(intra_inter_p, 1);
 
       if (cm->reference_mode == REFERENCE_MODE_SELECT)
-        base_cost += vp10_cost_bit(comp_inter_p, 1);
+        base_cost += av1_cost_bit(comp_inter_p, 1);
 
-      ref_costs_comp[LAST_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 0);
-      ref_costs_comp[GOLDEN_FRAME] = base_cost + vp10_cost_bit(ref_comp_p, 1);
+      ref_costs_comp[LAST_FRAME] = base_cost + av1_cost_bit(ref_comp_p, 0);
+      ref_costs_comp[GOLDEN_FRAME] = base_cost + av1_cost_bit(ref_comp_p, 1);
     } else {
       ref_costs_comp[LAST_FRAME] = 512;
       ref_costs_comp[GOLDEN_FRAME] = 512;
@@ -2073,13 +2073,13 @@ static void store_coding_context(
          sizeof(*best_filter_diff) * SWITCHABLE_FILTER_CONTEXTS);
 }
 
-static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
+static void setup_buffer_inter(AV1_COMP *cpi, MACROBLOCK *x,
                                MV_REFERENCE_FRAME ref_frame,
                                BLOCK_SIZE block_size, int mi_row, int mi_col,
                                int_mv frame_nearest_mv[MAX_REF_FRAMES],
                                int_mv frame_near_mv[MAX_REF_FRAMES],
                                struct buf_2d yv12_mb[4][MAX_MB_PLANE]) {
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   const YV12_BUFFER_CONFIG *yv12 = get_ref_frame_buffer(cpi, ref_frame);
   MACROBLOCKD *const xd = &x->e_mbd;
   MODE_INFO *const mi = xd->mi[0];
@@ -2091,30 +2091,30 @@ static void setup_buffer_inter(VP10_COMP *cpi, MACROBLOCK *x,
 
   // TODO(jkoleszar): Is the UV buffer ever used here? If so, need to make this
   // use the UV scaling factors.
-  vp10_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
+  av1_setup_pred_block(xd, yv12_mb[ref_frame], yv12, mi_row, mi_col, sf, sf);
 
   // Gets an initial list of candidate vectors from neighbours and orders them
-  vp10_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, NULL,
+  av1_find_mv_refs(cm, xd, mi, ref_frame, candidates, mi_row, mi_col, NULL,
                     NULL, mbmi_ext->mode_context);
 
   // Candidate refinement carried out at encoder and decoder
-  vp10_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
+  av1_find_best_ref_mvs(cm->allow_high_precision_mv, candidates,
                          &frame_nearest_mv[ref_frame],
                          &frame_near_mv[ref_frame]);
 
   // Further refinement that is encode side only to test the top few candidates
   // in full and choose the best as the centre point for subsequent searches.
   // The current implementation doesn't support scaling.
-  if (!vp10_is_scaled(sf) && block_size >= BLOCK_8X8)
-    vp10_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
+  if (!av1_is_scaled(sf) && block_size >= BLOCK_8X8)
+    av1_mv_pred(cpi, x, yv12_mb[ref_frame][0].buf, yv12->y_stride, ref_frame,
                  block_size);
 }
 
-static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
+static void single_motion_search(AV1_COMP *cpi, MACROBLOCK *x,
                                  BLOCK_SIZE bsize, int mi_row, int mi_col,
                                  int_mv *tmp_mv, int *rate_mv) {
   MACROBLOCKD *xd = &x->e_mbd;
-  const VP10_COMMON *cm = &cpi->common;
+  const AV1_COMMON *cm = &cpi->common;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   struct buf_2d backup_yv12[MAX_MB_PLANE] = { { 0, 0 } };
   int bestsme = INT_MAX;
@@ -2131,7 +2131,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   int cost_list[5];
 
   const YV12_BUFFER_CONFIG *scaled_ref_frame =
-      vp10_get_scaled_ref_frame(cpi, ref);
+      av1_get_scaled_ref_frame(cpi, ref);
 
   MV pred_mv[3];
   pred_mv[0] = x->mbmi_ext->ref_mvs[ref][0].as_mv;
@@ -2145,10 +2145,10 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     // motion search code to be used without additional modifications.
     for (i = 0; i < MAX_MB_PLANE; i++) backup_yv12[i] = xd->plane[i].pre[0];
 
-    vp10_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
+    av1_setup_pre_planes(xd, 0, scaled_ref_frame, mi_row, mi_col, NULL);
   }
 
-  vp10_set_mv_search_range(x, &ref_mv);
+  av1_set_mv_search_range(x, &ref_mv);
 
   // Work out the size of the first step in the mv step search.
   // 0 here is maximum length first step. 1 is VPXMAX >> 1 etc.
@@ -2157,7 +2157,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
     // max mv magnitude and that based on the best ref mvs of the current
     // block for the given reference.
     step_param =
-        (vp10_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
+        (av1_init_search_range(x->max_mv_context[ref]) + cpi->mv_step_param) /
         2;
   } else {
     step_param = cpi->mv_step_param;
@@ -2202,7 +2202,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
   mvp_full.col >>= 3;
   mvp_full.row >>= 3;
 
-  bestsme = vp10_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
+  bestsme = av1_full_pixel_search(cpi, x, bsize, &mvp_full, step_param, sadpb,
                                    cond_cost_list(cpi, cost_list), &ref_mv,
                                    &tmp_mv->as_mv, INT_MAX, 1);
 
@@ -2219,7 +2219,7 @@ static void single_motion_search(VP10_COMP *cpi, MACROBLOCK *x,
         cpi->sf.mv.subpel_iters_per_step, cond_cost_list(cpi, cost_list),
         x->nmvjointcost, x->mvcost, &dis, &x->pred_sse[ref], NULL, 0, 0);
   }
-  *rate_mv = vp10_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
+  *rate_mv = av1_mv_bit_cost(&tmp_mv->as_mv, &ref_mv, x->nmvjointcost,
                               x->mvcost, MV_COST_WEIGHT);
 
   if (cpi->sf.adaptive_motion_search) x->pred_mv[ref] = tmp_mv->as_mv;
@@ -2247,7 +2247,7 @@ static INLINE void restore_dst_buf(MACROBLOCKD *xd,
 // However, once established that vector may be usable through the nearest and
 // near mv modes to reduce distortion in subsequent blocks and also improve
 // visual quality.
-static int discount_newmv_test(const VP10_COMP *cpi, int this_mode,
+static int discount_newmv_test(const AV1_COMP *cpi, int this_mode,
                                int_mv this_mv,
                                int_mv (*mode_mv)[MAX_REF_FRAMES],
                                int ref_frame) {
@@ -2272,14 +2272,14 @@ static INLINE void clamp_mv2(MV *mv, const MACROBLOCKD *xd) {
 }
 
 static int64_t handle_inter_mode(
-    VP10_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
+    AV1_COMP *cpi, MACROBLOCK *x, BLOCK_SIZE bsize, int *rate2,
     int64_t *distortion, int *skippable, int *rate_y, int *rate_uv,
     int *disable_skip, int_mv (*mode_mv)[MAX_REF_FRAMES], int mi_row,
     int mi_col, int_mv single_newmv[MAX_REF_FRAMES],
     INTERP_FILTER (*single_filter)[MAX_REF_FRAMES],
     int (*single_skippable)[MAX_REF_FRAMES], int64_t *psse,
     const int64_t ref_best_rd, int64_t *mask_filter, int64_t filter_cache[]) {
-  VP10_COMMON *cm = &cpi->common;
+  AV1_COMMON *cm = &cpi->common;
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   MB_MODE_INFO_EXT *const mbmi_ext = x->mbmi_ext;
@@ -2290,12 +2290,12 @@ static int64_t handle_inter_mode(
   int refs[2] = { mbmi->ref_frame[0],
                   (mbmi->ref_frame[1] < 0 ? 0 : mbmi->ref_frame[1]) };
   int_mv cur_mv[2];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, tmp_buf16[MAX_MB_PLANE * 64 * 64]);
   uint8_t *tmp_buf;
 #else
   DECLARE_ALIGNED(16, uint8_t, tmp_buf[MAX_MB_PLANE * 64 * 64]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int pred_exists = 0;
   int intpel_mv;
   int64_t rd, tmp_rd, best_rd = INT64_MAX;
@@ -2319,13 +2319,13 @@ static int64_t handle_inter_mode(
   int64_t skip_sse_sb = INT64_MAX;
   int64_t distortion_y = 0, distortion_uv = 0;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     tmp_buf = CONVERT_TO_BYTEPTR(tmp_buf16);
   } else {
     tmp_buf = (uint8_t *)tmp_buf16;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if (pred_filter_search) {
     INTERP_FILTER af = SWITCHABLE, lf = SWITCHABLE;
@@ -2358,10 +2358,10 @@ static int64_t handle_inter_mode(
         joint_motion_search(cpi, x, bsize, frame_mv, mi_row, mi_col,
                             single_newmv, &rate_mv);
       } else {
-        rate_mv = vp10_mv_bit_cost(&frame_mv[refs[0]].as_mv,
+        rate_mv = av1_mv_bit_cost(&frame_mv[refs[0]].as_mv,
                                    &x->mbmi_ext->ref_mvs[refs[0]][0].as_mv,
                                    x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
-        rate_mv += vp10_mv_bit_cost(&frame_mv[refs[1]].as_mv,
+        rate_mv += av1_mv_bit_cost(&frame_mv[refs[1]].as_mv,
                                     &x->mbmi_ext->ref_mvs[refs[1]][0].as_mv,
                                     x->nmvjointcost, x->mvcost, MV_COST_WEIGHT);
       }
@@ -2450,7 +2450,7 @@ static int64_t handle_inter_mode(
         int64_t tmp_skip_sse = INT64_MAX;
 
         mbmi->interp_filter = i;
-        rs = vp10_get_switchable_rate(cpi, xd);
+        rs = av1_get_switchable_rate(cpi, xd);
         rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
 
         if (i > 0 && intpel_mv) {
@@ -2481,7 +2481,7 @@ static int64_t handle_inter_mode(
               xd->plane[j].dst.stride = 64;
             }
           }
-          vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+          av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
           model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum, &tmp_skip_sb,
                           &tmp_skip_sse);
 
@@ -2531,7 +2531,7 @@ static int64_t handle_inter_mode(
   // Set the appropriate filter
   mbmi->interp_filter =
       cm->interp_filter != SWITCHABLE ? cm->interp_filter : best_filter;
-  rs = cm->interp_filter == SWITCHABLE ? vp10_get_switchable_rate(cpi, xd) : 0;
+  rs = cm->interp_filter == SWITCHABLE ? av1_get_switchable_rate(cpi, xd) : 0;
 
   if (pred_exists) {
     if (best_needs_copy) {
@@ -2548,7 +2548,7 @@ static int64_t handle_inter_mode(
     // Handles the special case when a filter that is not in the
     // switchable list (ex. bilinear) is indicated at the frame level, or
     // skip condition holds.
-    vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+    av1_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
     model_rd_for_sb(cpi, bsize, x, xd, &tmp_rate, &tmp_dist, &skip_txfm_sb,
                     &skip_sse_sb);
     rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate, tmp_dist);
@@ -2584,7 +2584,7 @@ static int64_t handle_inter_mode(
     int64_t rdcosty = INT64_MAX;
 
     // Y cost and distortion
-    vp10_subtract_plane(x, bsize, 0);
+    av1_subtract_plane(x, bsize, 0);
     super_block_yrd(cpi, x, rate_y, &distortion_y, &skippable_y, psse, bsize,
                     ref_best_rd);
 
@@ -2618,7 +2618,7 @@ static int64_t handle_inter_mode(
     *disable_skip = 1;
 
     // The cost of skip bit needs to be added.
-    *rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+    *rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
 
     *distortion = skip_sse_sb;
   }
@@ -2629,10 +2629,10 @@ static int64_t handle_inter_mode(
   return 0;  // The rate-distortion cost will be re-calculated by caller.
 }
 
-void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
+void av1_rd_pick_intra_mode_sb(AV1_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
                                 BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
                                 int64_t best_rd) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblockd_plane *const pd = xd->plane;
   int rate_y = 0, rate_uv = 0, rate_y_tokenonly = 0, rate_uv_tokenonly = 0;
@@ -2664,11 +2664,11 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
 
   if (y_skip && uv_skip) {
     rd_cost->rate = rate_y + rate_uv - rate_y_tokenonly - rate_uv_tokenonly +
-                    vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+                    av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
     rd_cost->dist = dist_y + dist_uv;
   } else {
     rd_cost->rate =
-        rate_y + rate_uv + vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        rate_y + rate_uv + av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
     rd_cost->dist = dist_y + dist_uv;
   }
 
@@ -2682,7 +2682,7 @@ void vp10_rd_pick_intra_mode_sb(VP10_COMP *cpi, MACROBLOCK *x, RD_COST *rd_cost,
 #define LOW_VAR_THRESH 16
 #define VLOW_ADJ_MAX 25
 #define VHIGH_ADJ_MAX 8
-static void rd_variance_adjustment(VP10_COMP *cpi, MACROBLOCK *x,
+static void rd_variance_adjustment(AV1_COMP *cpi, MACROBLOCK *x,
                                    BLOCK_SIZE bsize, int64_t *this_rd,
                                    MV_REFERENCE_FRAME ref_frame,
                                    unsigned int source_variance) {
@@ -2694,18 +2694,18 @@ static void rd_variance_adjustment(VP10_COMP *cpi, MACROBLOCK *x,
 
   if (*this_rd == INT64_MAX) return;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    recon_variance = vp10_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
+    recon_variance = av1_high_get_sby_perpixel_variance(cpi, &xd->plane[0].dst,
                                                          bsize, xd->bd);
   } else {
     recon_variance =
-        vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+        av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
   }
 #else
   recon_variance =
-      vp10_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+      av1_get_sby_perpixel_variance(cpi, &xd->plane[0].dst, bsize);
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   if ((source_variance + recon_variance) > LOW_VAR_THRESH) {
     absvar_diff = (source_variance > recon_variance)
@@ -2734,7 +2734,7 @@ static void rd_variance_adjustment(VP10_COMP *cpi, MACROBLOCK *x,
 }
 
 // Do we have an internal image edge (e.g. formatting bars).
-int vp10_internal_image_edge(VP10_COMP *cpi) {
+int av1_internal_image_edge(AV1_COMP *cpi) {
   return (cpi->oxcf.pass == 2) &&
          ((cpi->twopass.this_frame_stats.inactive_zone_rows > 0) ||
           (cpi->twopass.this_frame_stats.inactive_zone_cols > 0));
@@ -2743,7 +2743,7 @@ int vp10_internal_image_edge(VP10_COMP *cpi) {
 // Checks to see if a super block is on a horizontal image edge.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
+int av1_active_h_edge(AV1_COMP *cpi, int mi_row, int mi_step) {
   int top_edge = 0;
   int bottom_edge = cpi->common.mi_rows;
   int is_active_h_edge = 0;
@@ -2770,7 +2770,7 @@ int vp10_active_h_edge(VP10_COMP *cpi, int mi_row, int mi_step) {
 // Checks to see if a super block is on a vertical image edge.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
+int av1_active_v_edge(AV1_COMP *cpi, int mi_col, int mi_step) {
   int left_edge = 0;
   int right_edge = cpi->common.mi_cols;
   int is_active_v_edge = 0;
@@ -2797,17 +2797,17 @@ int vp10_active_v_edge(VP10_COMP *cpi, int mi_col, int mi_step) {
 // Checks to see if a super block is at the edge of the active image.
 // In most cases this is the "real" edge unless there are formatting
 // bars embedded in the stream.
-int vp10_active_edge_sb(VP10_COMP *cpi, int mi_row, int mi_col) {
-  return vp10_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
-         vp10_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
+int av1_active_edge_sb(AV1_COMP *cpi, int mi_row, int mi_col) {
+  return av1_active_h_edge(cpi, mi_row, MI_BLOCK_SIZE) ||
+         av1_active_v_edge(cpi, mi_col, MI_BLOCK_SIZE);
 }
 
-void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sb(AV1_COMP *cpi, TileDataEnc *tile_data,
                                 MACROBLOCK *x, int mi_row, int mi_col,
                                 RD_COST *rd_cost, BLOCK_SIZE bsize,
                                 PICK_MODE_CONTEXT *ctx,
                                 int64_t best_rd_so_far) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RD_OPT *const rd_opt = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -2842,7 +2842,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
   int64_t dist_uv[TX_SIZES];
   int skip_uv[TX_SIZES];
   PREDICTION_MODE mode_uv[TX_SIZES];
-  const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+  const int intra_cost_penalty = av1_get_intra_cost_penalty(
       cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
   int best_skip2 = 0;
   uint8_t ref_frame_skip_mask[2] = { 0 };
@@ -2856,7 +2856,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
   int64_t mask_filter = 0;
   int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
 
-  vp10_zero(best_mbmode);
+  av1_zero(best_mbmode);
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
 
@@ -2991,9 +2991,9 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
     int64_t total_sse = INT64_MAX;
     int early_term = 0;
 
-    this_mode = vp10_mode_order[mode_index].mode;
-    ref_frame = vp10_mode_order[mode_index].ref_frame[0];
-    second_ref_frame = vp10_mode_order[mode_index].ref_frame[1];
+    this_mode = av1_mode_order[mode_index].mode;
+    ref_frame = av1_mode_order[mode_index].ref_frame[0];
+    second_ref_frame = av1_mode_order[mode_index].ref_frame[1];
 
     // Look at the reference frame of the best mode so far and set the
     // skip mask to look at a subset of the remaining modes.
@@ -3130,7 +3130,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
           &mask_filter, filter_cache);
       if (this_rd == INT64_MAX) continue;
 
-      compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+      compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
 
       if (cm->reference_mode == REFERENCE_MODE_SELECT) rate2 += compmode_cost;
     }
@@ -3149,15 +3149,15 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
         rate2 -= (rate_y + rate_uv);
 
         // Cost the skip mb case
-        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+        rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
       } else if (ref_frame != INTRA_FRAME && !xd->lossless[mbmi->segment_id]) {
         if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
             RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
           // Add in the cost of the no skip flag.
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
         } else {
           // FIXME(rbultje) make this work for splitmv also
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
           distortion2 = total_sse;
           assert(total_sse >= 0);
           rate2 -= (rate_y + rate_uv);
@@ -3165,7 +3165,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
         }
       } else {
         // Add in the cost of the no skip flag.
-        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
       }
 
       // Calculate the final RD estimate for this mode.
@@ -3226,11 +3226,11 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
           int qstep = xd->plane[0].dequant[1];
           // TODO(debargha): Enhance this by specializing for each mode_index
           int scale = 4;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             qstep >>= (xd->bd - 8);
           }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
           if (x->source_variance < UINT_MAX) {
             const int var_adjust = (x->source_variance < 16);
             scale -= var_adjust;
@@ -3348,7 +3348,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
          !is_inter_block(&best_mbmode));
 
   if (!cpi->rc.is_src_frame_alt_ref)
-    vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
+    av1_update_rd_thresh_fact(tile_data->thresh_freq_fact,
                                sf->adaptive_rd_thresh, bsize, best_mode_index);
 
   // macroblock modes
@@ -3372,7 +3372,7 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
     if (cm->interp_filter == SWITCHABLE)
       assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
   } else {
-    vp10_zero(best_filter_diff);
+    av1_zero(best_filter_diff);
   }
 
   // TODO(yunqingwang): Moving this line in front of the above best_filter_diff
@@ -3385,12 +3385,12 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
     int max_plane = is_inter_block(&xd->mi[0]->mbmi) ? MAX_MB_PLANE : 1;
     for (plane = 0; plane < max_plane; ++plane) {
       x->plane[plane].eobs = ctx->eobs_pbuf[plane][1];
-      has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
+      has_high_freq_coeff |= av1_has_high_freq_in_plane(x, bsize, plane);
     }
 
     for (plane = max_plane; plane < MAX_MB_PLANE; ++plane) {
       x->plane[plane].eobs = ctx->eobs_pbuf[plane][2];
-      has_high_freq_coeff |= vp10_has_high_freq_in_plane(x, bsize, plane);
+      has_high_freq_coeff |= av1_has_high_freq_in_plane(x, bsize, plane);
     }
 
     best_mode_skippable |= !has_high_freq_coeff;
@@ -3402,12 +3402,12 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi, TileDataEnc *tile_data,
                        best_filter_diff, best_mode_skippable);
 }
 
-void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sb_seg_skip(AV1_COMP *cpi, TileDataEnc *tile_data,
                                          MACROBLOCK *x, RD_COST *rd_cost,
                                          BLOCK_SIZE bsize,
                                          PICK_MODE_CONTEXT *ctx,
                                          int64_t best_rd_so_far) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
   unsigned char segment_id = mbmi->segment_id;
@@ -3447,7 +3447,7 @@ void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
       int best_rs = INT_MAX;
       for (i = 0; i < SWITCHABLE_FILTERS; ++i) {
         mbmi->interp_filter = i;
-        rs = vp10_get_switchable_rate(cpi, xd);
+        rs = av1_get_switchable_rate(cpi, xd);
         if (rs < best_rs) {
           best_rs = rs;
           best_filter = mbmi->interp_filter;
@@ -3458,13 +3458,13 @@ void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
   // Set the appropriate filter
   if (cm->interp_filter == SWITCHABLE) {
     mbmi->interp_filter = best_filter;
-    rate2 += vp10_get_switchable_rate(cpi, xd);
+    rate2 += av1_get_switchable_rate(cpi, xd);
   } else {
     mbmi->interp_filter = cm->interp_filter;
   }
 
   if (cm->reference_mode == REFERENCE_MODE_SELECT)
-    rate2 += vp10_cost_bit(comp_mode_p, comp_pred);
+    rate2 += av1_cost_bit(comp_mode_p, comp_pred);
 
   // Estimate the reference frame signaling cost and add it
   // to the rolling cost variable.
@@ -3484,22 +3484,22 @@ void vp10_rd_pick_inter_mode_sb_seg_skip(VP10_COMP *cpi, TileDataEnc *tile_data,
   assert((cm->interp_filter == SWITCHABLE) ||
          (cm->interp_filter == mbmi->interp_filter));
 
-  vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
+  av1_update_rd_thresh_fact(tile_data->thresh_freq_fact,
                              cpi->sf.adaptive_rd_thresh, bsize, THR_ZEROMV);
 
-  vp10_zero(best_pred_diff);
-  vp10_zero(best_filter_diff);
+  av1_zero(best_pred_diff);
+  av1_zero(best_filter_diff);
 
   if (!x->select_tx_size) swap_block_ptr(x, ctx, 1, 0, 0, MAX_MB_PLANE);
   store_coding_context(x, ctx, THR_ZEROMV, best_pred_diff, best_filter_diff, 0);
 }
 
-void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
+void av1_rd_pick_inter_mode_sub8x8(AV1_COMP *cpi, TileDataEnc *tile_data,
                                     MACROBLOCK *x, int mi_row, int mi_col,
                                     RD_COST *rd_cost, BLOCK_SIZE bsize,
                                     PICK_MODE_CONTEXT *ctx,
                                     int64_t best_rd_so_far) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   RD_OPT *const rd_opt = &cpi->rd;
   SPEED_FEATURES *const sf = &cpi->sf;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -3527,7 +3527,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
   int64_t dist_uv;
   int skip_uv;
   PREDICTION_MODE mode_uv = DC_PRED;
-  const int intra_cost_penalty = vp10_get_intra_cost_penalty(
+  const int intra_cost_penalty = av1_get_intra_cost_penalty(
       cm->base_qindex, cm->y_dc_delta_q, cm->bit_depth);
   int_mv seg_mvs[4][MAX_REF_FRAMES];
   b_mode_info best_bmodes[4];
@@ -3536,10 +3536,10 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
   int64_t mask_filter = 0;
   int64_t filter_cache[SWITCHABLE_FILTER_CONTEXTS];
   int internal_active_edge =
-      vp10_active_edge_sb(cpi, mi_row, mi_col) && vp10_internal_image_edge(cpi);
+      av1_active_edge_sb(cpi, mi_row, mi_col) && av1_internal_image_edge(cpi);
 
   memset(x->zcoeff_blk[TX_4X4], 0, 4);
-  vp10_zero(best_mbmode);
+  av1_zero(best_mbmode);
 
   for (i = 0; i < SWITCHABLE_FILTER_CONTEXTS; ++i) filter_cache[i] = INT64_MAX;
 
@@ -3583,8 +3583,8 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
     int64_t total_sse = INT_MAX;
     int early_term = 0;
 
-    ref_frame = vp10_ref_order[ref_index].ref_frame[0];
-    second_ref_frame = vp10_ref_order[ref_index].ref_frame[1];
+    ref_frame = av1_ref_order[ref_index].ref_frame[0];
+    second_ref_frame = av1_ref_order[ref_index].ref_frame[1];
 
     // Look at the reference frame of the best mode so far and set the
     // skip mask to look at a subset of the remaining modes.
@@ -3636,11 +3636,11 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
     // TODO(jingning, jkoleszar): scaling reference frame not supported for
     // sub8x8 blocks.
     if (ref_frame > INTRA_FRAME &&
-        vp10_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
+        av1_is_scaled(&cm->frame_refs[ref_frame - 1].sf))
       continue;
 
     if (second_ref_frame > INTRA_FRAME &&
-        vp10_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
+        av1_is_scaled(&cm->frame_refs[second_ref_frame - 1].sf))
       continue;
 
     if (comp_pred)
@@ -3752,7 +3752,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
                 mi_row, mi_col);
 
             if (tmp_rd == INT64_MAX) continue;
-            rs = vp10_get_switchable_rate(cpi, xd);
+            rs = av1_get_switchable_rate(cpi, xd);
             rs_rd = RDCOST(x->rdmult, x->rddiv, rs, 0);
             filter_cache[switchable_filter_index] = tmp_rd;
             filter_cache[SWITCHABLE_FILTERS] =
@@ -3823,13 +3823,13 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
       distortion2 += distortion;
 
       if (cm->interp_filter == SWITCHABLE)
-        rate2 += vp10_get_switchable_rate(cpi, xd);
+        rate2 += av1_get_switchable_rate(cpi, xd);
 
       if (!mode_excluded)
         mode_excluded = comp_pred ? cm->reference_mode == SINGLE_REFERENCE
                                   : cm->reference_mode == COMPOUND_REFERENCE;
 
-      compmode_cost = vp10_cost_bit(comp_mode_p, comp_pred);
+      compmode_cost = av1_cost_bit(comp_mode_p, comp_pred);
 
       tmp_best_rdu =
           best_rd - VPXMIN(RDCOST(x->rdmult, x->rddiv, rate2, distortion2),
@@ -3838,7 +3838,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
       if (tmp_best_rdu > 0) {
         // If even the 'Y' rd value of split is higher than best so far
         // then dont bother looking at UV
-        vp10_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
+        av1_build_inter_predictors_sbuv(&x->e_mbd, mi_row, mi_col, BLOCK_8X8);
         memset(x->skip_txfm, SKIP_TXFM_NONE, sizeof(x->skip_txfm));
         if (!super_block_uvrd(cpi, x, &rate_uv, &distortion_uv, &uv_skippable,
                               &uv_sse, BLOCK_8X8, tmp_best_rdu))
@@ -3869,10 +3869,10 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
         if (RDCOST(x->rdmult, x->rddiv, rate_y + rate_uv, distortion2) <
             RDCOST(x->rdmult, x->rddiv, 0, total_sse)) {
           // Add in the cost of the no skip flag.
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
         } else {
           // FIXME(rbultje) make this work for splitmv also
-          rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 1);
+          rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 1);
           distortion2 = total_sse;
           assert(total_sse >= 0);
           rate2 -= (rate_y + rate_uv);
@@ -3882,7 +3882,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
         }
       } else {
         // Add in the cost of the no skip flag.
-        rate2 += vp10_cost_bit(vp10_get_skip_prob(cm, xd), 0);
+        rate2 += av1_cost_bit(av1_get_skip_prob(cm, xd), 0);
       }
 
       // Calculate the final RD estimate for this mode.
@@ -3930,11 +3930,11 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
           int qstep = xd->plane[0].dequant[1];
           // TODO(debargha): Enhance this by specializing for each mode_index
           int scale = 4;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             qstep >>= (xd->bd - 8);
           }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
           if (x->source_variance < UINT_MAX) {
             const int var_adjust = (x->source_variance < 16);
             scale -= var_adjust;
@@ -4026,7 +4026,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
          (cm->interp_filter == best_mbmode.interp_filter) ||
          !is_inter_block(&best_mbmode));
 
-  vp10_update_rd_thresh_fact(tile_data->thresh_freq_fact,
+  av1_update_rd_thresh_fact(tile_data->thresh_freq_fact,
                              sf->adaptive_rd_thresh, bsize, best_ref_index);
 
   // macroblock modes
@@ -4059,7 +4059,7 @@ void vp10_rd_pick_inter_mode_sub8x8(VP10_COMP *cpi, TileDataEnc *tile_data,
     if (cm->interp_filter == SWITCHABLE)
       assert(best_filter_diff[SWITCHABLE_FILTERS] == 0);
   } else {
-    vp10_zero(best_filter_diff);
+    av1_zero(best_filter_diff);
   }
 
   store_coding_context(x, ctx, best_ref_index, best_pred_diff, best_filter_diff,
diff --git a/av1/encoder/rdopt.h b/av1/encoder/rdopt.h
index e07aebabef445994a982a93768b35c5f4287a162..6097b42d515ad5bf4e18759b649f1a7cb900c32e 100644
--- a/av1/encoder/rdopt.h
+++ b/av1/encoder/rdopt.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_RDOPT_H_
-#define VP10_ENCODER_RDOPT_H_
+#ifndef AV1_ENCODER_RDOPT_H_
+#define AV1_ENCODER_RDOPT_H_
 
 #include "av1/common/blockd.h"
 
@@ -22,40 +22,40 @@ extern "C" {
 #endif
 
 struct TileInfo;
-struct VP10_COMP;
+struct AV1_COMP;
 struct macroblock;
 struct RD_COST;
 
-void vp10_rd_pick_intra_mode_sb(struct VP10_COMP *cpi, struct macroblock *x,
+void av1_rd_pick_intra_mode_sb(struct AV1_COMP *cpi, struct macroblock *x,
                                 struct RD_COST *rd_cost, BLOCK_SIZE bsize,
                                 PICK_MODE_CONTEXT *ctx, int64_t best_rd);
 
-unsigned int vp10_get_sby_perpixel_variance(VP10_COMP *cpi,
+unsigned int av1_get_sby_perpixel_variance(AV1_COMP *cpi,
                                             const struct buf_2d *ref,
                                             BLOCK_SIZE bs);
-#if CONFIG_VPX_HIGHBITDEPTH
-unsigned int vp10_high_get_sby_perpixel_variance(VP10_COMP *cpi,
+#if CONFIG_AOM_HIGHBITDEPTH
+unsigned int av1_high_get_sby_perpixel_variance(AV1_COMP *cpi,
                                                  const struct buf_2d *ref,
                                                  BLOCK_SIZE bs, int bd);
 #endif
 
-void vp10_rd_pick_inter_mode_sb(struct VP10_COMP *cpi,
+void av1_rd_pick_inter_mode_sb(struct AV1_COMP *cpi,
                                 struct TileDataEnc *tile_data,
                                 struct macroblock *x, int mi_row, int mi_col,
                                 struct RD_COST *rd_cost, BLOCK_SIZE bsize,
                                 PICK_MODE_CONTEXT *ctx, int64_t best_rd_so_far);
 
-void vp10_rd_pick_inter_mode_sb_seg_skip(
-    struct VP10_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
+void av1_rd_pick_inter_mode_sb_seg_skip(
+    struct AV1_COMP *cpi, struct TileDataEnc *tile_data, struct macroblock *x,
     struct RD_COST *rd_cost, BLOCK_SIZE bsize, PICK_MODE_CONTEXT *ctx,
     int64_t best_rd_so_far);
 
-int vp10_internal_image_edge(struct VP10_COMP *cpi);
-int vp10_active_h_edge(struct VP10_COMP *cpi, int mi_row, int mi_step);
-int vp10_active_v_edge(struct VP10_COMP *cpi, int mi_col, int mi_step);
-int vp10_active_edge_sb(struct VP10_COMP *cpi, int mi_row, int mi_col);
+int av1_internal_image_edge(struct AV1_COMP *cpi);
+int av1_active_h_edge(struct AV1_COMP *cpi, int mi_row, int mi_step);
+int av1_active_v_edge(struct AV1_COMP *cpi, int mi_col, int mi_step);
+int av1_active_edge_sb(struct AV1_COMP *cpi, int mi_row, int mi_col);
 
-void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
+void av1_rd_pick_inter_mode_sub8x8(struct AV1_COMP *cpi,
                                     struct TileDataEnc *tile_data,
                                     struct macroblock *x, int mi_row,
                                     int mi_col, struct RD_COST *rd_cost,
@@ -66,4 +66,4 @@ void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RDOPT_H_
+#endif  // AV1_ENCODER_RDOPT_H_
diff --git a/av1/encoder/resize.c b/av1/encoder/resize.c
index 18731d6a6aa0aa42bfa3a4d96fd63528e6337ef3..c5c5699dc98789ca62e38b08ebd83bfd01cd9573 100644
--- a/av1/encoder/resize.c
+++ b/av1/encoder/resize.c
@@ -16,9 +16,9 @@
 #include <stdlib.h>
 #include <string.h>
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #include "aom_dsp/aom_dsp_common.h"
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #include "aom_ports/mem.h"
 #include "av1/common/common.h"
 #include "av1/encoder/resize.h"
@@ -133,8 +133,8 @@ static const interp_kernel filteredinterp_filters1000[(1 << SUBPEL_BITS)] = {
 };
 
 // Filters for factor of 2 downsampling.
-static const int16_t vp10_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
-static const int16_t vp10_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
+static const int16_t av1_down2_symeven_half_filter[] = { 56, 12, -3, -1 };
+static const int16_t av1_down2_symodd_half_filter[] = { 64, 35, 0, -3 };
 
 static const interp_kernel *choose_interp_filter(int inlength, int outlength) {
   int outlength16 = outlength * 16;
@@ -240,8 +240,8 @@ static void interpolate(const uint8_t *const input, int inlength,
 static void down2_symeven(const uint8_t *const input, int length,
                           uint8_t *output) {
   // Actual filter len = 2 * filter_len_half.
-  const int16_t *filter = vp10_down2_symeven_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+  const int16_t *filter = av1_down2_symeven_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
   int i, j;
   uint8_t *optr = output;
   int l1 = filter_len_half;
@@ -296,8 +296,8 @@ static void down2_symeven(const uint8_t *const input, int length,
 static void down2_symodd(const uint8_t *const input, int length,
                          uint8_t *output) {
   // Actual filter len = 2 * filter_len_half - 1.
-  const int16_t *filter = vp10_down2_symodd_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+  const int16_t *filter = av1_down2_symodd_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
   int i, j;
   uint8_t *optr = output;
   int l1 = filter_len_half - 1;
@@ -426,7 +426,7 @@ static void fill_arr_to_col(uint8_t *img, int stride, int len, uint8_t *arr) {
   }
 }
 
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
+void av1_resize_plane(const uint8_t *const input, int height, int width,
                        int in_stride, uint8_t *output, int height2, int width2,
                        int out_stride) {
   int i;
@@ -451,7 +451,7 @@ void vp10_resize_plane(const uint8_t *const input, int height, int width,
   free(arrbuf);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_interpolate(const uint16_t *const input, int inlength,
                                uint16_t *output, int outlength, int bd) {
   const int64_t delta =
@@ -542,8 +542,8 @@ static void highbd_interpolate(const uint16_t *const input, int inlength,
 static void highbd_down2_symeven(const uint16_t *const input, int length,
                                  uint16_t *output, int bd) {
   // Actual filter len = 2 * filter_len_half.
-  static const int16_t *filter = vp10_down2_symeven_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symeven_half_filter) / 2;
+  static const int16_t *filter = av1_down2_symeven_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symeven_half_filter) / 2;
   int i, j;
   uint16_t *optr = output;
   int l1 = filter_len_half;
@@ -598,8 +598,8 @@ static void highbd_down2_symeven(const uint16_t *const input, int length,
 static void highbd_down2_symodd(const uint16_t *const input, int length,
                                 uint16_t *output, int bd) {
   // Actual filter len = 2 * filter_len_half - 1.
-  static const int16_t *filter = vp10_down2_symodd_half_filter;
-  const int filter_len_half = sizeof(vp10_down2_symodd_half_filter) / 2;
+  static const int16_t *filter = av1_down2_symodd_half_filter;
+  const int filter_len_half = sizeof(av1_down2_symodd_half_filter) / 2;
   int i, j;
   uint16_t *optr = output;
   int l1 = filter_len_half - 1;
@@ -715,7 +715,7 @@ static void highbd_fill_arr_to_col(uint16_t *img, int stride, int len,
   }
 }
 
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
                               int in_stride, uint8_t *output, int height2,
                               int width2, int out_stride, int bd) {
   int i;
@@ -738,84 +738,84 @@ void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
   free(tmpbuf);
   free(arrbuf);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
                           const uint8_t *const u, const uint8_t *const v,
                           int uv_stride, int height, int width, uint8_t *oy,
                           int oy_stride, uint8_t *ou, uint8_t *ov,
                           int ouv_stride, int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+  av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  av1_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
                     owidth / 2, ouv_stride);
-  vp10_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+  av1_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
                     owidth / 2, ouv_stride);
 }
 
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
                           const uint8_t *const u, const uint8_t *const v,
                           int uv_stride, int height, int width, uint8_t *oy,
                           int oy_stride, uint8_t *ou, uint8_t *ov,
                           int ouv_stride, int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
+  av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  av1_resize_plane(u, height, width / 2, uv_stride, ou, oheight, owidth / 2,
                     ouv_stride);
-  vp10_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
+  av1_resize_plane(v, height, width / 2, uv_stride, ov, oheight, owidth / 2,
                     ouv_stride);
 }
 
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
                           const uint8_t *const u, const uint8_t *const v,
                           int uv_stride, int height, int width, uint8_t *oy,
                           int oy_stride, uint8_t *ou, uint8_t *ov,
                           int ouv_stride, int oheight, int owidth) {
-  vp10_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
-  vp10_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+  av1_resize_plane(y, height, width, y_stride, oy, oheight, owidth, oy_stride);
+  av1_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
                     ouv_stride);
-  vp10_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+  av1_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
                     ouv_stride);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
                                  const uint8_t *const u, const uint8_t *const v,
                                  int uv_stride, int height, int width,
                                  uint8_t *oy, int oy_stride, uint8_t *ou,
                                  uint8_t *ov, int ouv_stride, int oheight,
                                  int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+  av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
                            oy_stride, bd);
-  vp10_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
+  av1_highbd_resize_plane(u, height / 2, width / 2, uv_stride, ou, oheight / 2,
                            owidth / 2, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
+  av1_highbd_resize_plane(v, height / 2, width / 2, uv_stride, ov, oheight / 2,
                            owidth / 2, ouv_stride, bd);
 }
 
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
                                  const uint8_t *const u, const uint8_t *const v,
                                  int uv_stride, int height, int width,
                                  uint8_t *oy, int oy_stride, uint8_t *ou,
                                  uint8_t *ov, int ouv_stride, int oheight,
                                  int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+  av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
                            oy_stride, bd);
-  vp10_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
+  av1_highbd_resize_plane(u, height, width / 2, uv_stride, ou, oheight,
                            owidth / 2, ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
+  av1_highbd_resize_plane(v, height, width / 2, uv_stride, ov, oheight,
                            owidth / 2, ouv_stride, bd);
 }
 
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
                                  const uint8_t *const u, const uint8_t *const v,
                                  int uv_stride, int height, int width,
                                  uint8_t *oy, int oy_stride, uint8_t *ou,
                                  uint8_t *ov, int ouv_stride, int oheight,
                                  int owidth, int bd) {
-  vp10_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
+  av1_highbd_resize_plane(y, height, width, y_stride, oy, oheight, owidth,
                            oy_stride, bd);
-  vp10_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
+  av1_highbd_resize_plane(u, height, width, uv_stride, ou, oheight, owidth,
                            ouv_stride, bd);
-  vp10_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
+  av1_highbd_resize_plane(v, height, width, uv_stride, ov, oheight, owidth,
                            ouv_stride, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/av1/encoder/resize.h b/av1/encoder/resize.h
index 030f4e46c18afc6e72a1d53e36e46ef12b5e5cc1..a7b02c7b6c03aa7725593ad5550e2326f6c79816 100644
--- a/av1/encoder/resize.h
+++ b/av1/encoder/resize.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_RESIZE_H_
-#define VP10_ENCODER_RESIZE_H_
+#ifndef AV1_ENCODER_RESIZE_H_
+#define AV1_ENCODER_RESIZE_H_
 
 #include <stdio.h>
 #include "aom/aom_integer.h"
@@ -19,51 +19,51 @@
 extern "C" {
 #endif
 
-void vp10_resize_plane(const uint8_t *const input, int height, int width,
+void av1_resize_plane(const uint8_t *const input, int height, int width,
                        int in_stride, uint8_t *output, int height2, int width2,
                        int out_stride);
-void vp10_resize_frame420(const uint8_t *const y, int y_stride,
+void av1_resize_frame420(const uint8_t *const y, int y_stride,
                           const uint8_t *const u, const uint8_t *const v,
                           int uv_stride, int height, int width, uint8_t *oy,
                           int oy_stride, uint8_t *ou, uint8_t *ov,
                           int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_resize_frame422(const uint8_t *const y, int y_stride,
                           const uint8_t *const u, const uint8_t *const v,
                           int uv_stride, int height, int width, uint8_t *oy,
                           int oy_stride, uint8_t *ou, uint8_t *ov,
                           int ouv_stride, int oheight, int owidth);
-void vp10_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_resize_frame444(const uint8_t *const y, int y_stride,
                           const uint8_t *const u, const uint8_t *const v,
                           int uv_stride, int height, int width, uint8_t *oy,
                           int oy_stride, uint8_t *ou, uint8_t *ov,
                           int ouv_stride, int oheight, int owidth);
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_resize_plane(const uint8_t *const input, int height, int width,
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_resize_plane(const uint8_t *const input, int height, int width,
                               int in_stride, uint8_t *output, int height2,
                               int width2, int out_stride, int bd);
-void vp10_highbd_resize_frame420(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame420(const uint8_t *const y, int y_stride,
                                  const uint8_t *const u, const uint8_t *const v,
                                  int uv_stride, int height, int width,
                                  uint8_t *oy, int oy_stride, uint8_t *ou,
                                  uint8_t *ov, int ouv_stride, int oheight,
                                  int owidth, int bd);
-void vp10_highbd_resize_frame422(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame422(const uint8_t *const y, int y_stride,
                                  const uint8_t *const u, const uint8_t *const v,
                                  int uv_stride, int height, int width,
                                  uint8_t *oy, int oy_stride, uint8_t *ou,
                                  uint8_t *ov, int ouv_stride, int oheight,
                                  int owidth, int bd);
-void vp10_highbd_resize_frame444(const uint8_t *const y, int y_stride,
+void av1_highbd_resize_frame444(const uint8_t *const y, int y_stride,
                                  const uint8_t *const u, const uint8_t *const v,
                                  int uv_stride, int height, int width,
                                  uint8_t *oy, int oy_stride, uint8_t *ou,
                                  uint8_t *ov, int ouv_stride, int oheight,
                                  int owidth, int bd);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_RESIZE_H_
+#endif  // AV1_ENCODER_RESIZE_H_
diff --git a/av1/encoder/segmentation.c b/av1/encoder/segmentation.c
index 0c0dccefd92f94519be26a39ee5167a5c95aaf8d..5388b943685d544578c03bf3d1c05646f53f40cd 100644
--- a/av1/encoder/segmentation.c
+++ b/av1/encoder/segmentation.c
@@ -20,30 +20,30 @@
 #include "av1/encoder/segmentation.h"
 #include "av1/encoder/subexp.h"
 
-void vp10_enable_segmentation(struct segmentation *seg) {
+void av1_enable_segmentation(struct segmentation *seg) {
   seg->enabled = 1;
   seg->update_map = 1;
   seg->update_data = 1;
 }
 
-void vp10_disable_segmentation(struct segmentation *seg) {
+void av1_disable_segmentation(struct segmentation *seg) {
   seg->enabled = 0;
   seg->update_map = 0;
   seg->update_data = 0;
 }
 
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
                            unsigned char abs_delta) {
   seg->abs_delta = abs_delta;
 
   memcpy(seg->feature_data, feature_data, sizeof(seg->feature_data));
 }
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
                              SEG_LVL_FEATURES feature_id) {
   seg->feature_mask[segment_id] &= ~(1 << feature_id);
 }
 
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
                         SEG_LVL_FEATURES feature_id) {
   seg->feature_data[segment_id][feature_id] = 0;
 }
@@ -74,7 +74,7 @@ static void calc_segtree_probs(unsigned *segcounts,
   for (i = 0; i < 7; i++) {
     const unsigned *ct =
         i == 0 ? ccc : i < 3 ? cc + (i & 2) : segcounts + (i - 3) * 2;
-    vp10_prob_diff_update_savings_search(
+    av1_prob_diff_update_savings_search(
         ct, cur_tree_probs[i], &segment_tree_probs[i], DIFF_UPDATE_PROB);
   }
 #else
@@ -92,35 +92,35 @@ static int cost_segmap(unsigned *segcounts, aom_prob *probs) {
   const int c4567 = c45 + c67;
 
   // Cost the top node of the tree
-  int cost = c0123 * vp10_cost_zero(probs[0]) + c4567 * vp10_cost_one(probs[0]);
+  int cost = c0123 * av1_cost_zero(probs[0]) + c4567 * av1_cost_one(probs[0]);
 
   // Cost subsequent levels
   if (c0123 > 0) {
-    cost += c01 * vp10_cost_zero(probs[1]) + c23 * vp10_cost_one(probs[1]);
+    cost += c01 * av1_cost_zero(probs[1]) + c23 * av1_cost_one(probs[1]);
 
     if (c01 > 0)
-      cost += segcounts[0] * vp10_cost_zero(probs[3]) +
-              segcounts[1] * vp10_cost_one(probs[3]);
+      cost += segcounts[0] * av1_cost_zero(probs[3]) +
+              segcounts[1] * av1_cost_one(probs[3]);
     if (c23 > 0)
-      cost += segcounts[2] * vp10_cost_zero(probs[4]) +
-              segcounts[3] * vp10_cost_one(probs[4]);
+      cost += segcounts[2] * av1_cost_zero(probs[4]) +
+              segcounts[3] * av1_cost_one(probs[4]);
   }
 
   if (c4567 > 0) {
-    cost += c45 * vp10_cost_zero(probs[2]) + c67 * vp10_cost_one(probs[2]);
+    cost += c45 * av1_cost_zero(probs[2]) + c67 * av1_cost_one(probs[2]);
 
     if (c45 > 0)
-      cost += segcounts[4] * vp10_cost_zero(probs[5]) +
-              segcounts[5] * vp10_cost_one(probs[5]);
+      cost += segcounts[4] * av1_cost_zero(probs[5]) +
+              segcounts[5] * av1_cost_one(probs[5]);
     if (c67 > 0)
-      cost += segcounts[6] * vp10_cost_zero(probs[6]) +
-              segcounts[7] * vp10_cost_one(probs[6]);
+      cost += segcounts[6] * av1_cost_zero(probs[6]) +
+              segcounts[7] * av1_cost_one(probs[6]);
   }
 
   return cost;
 }
 
-static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs(const AV1_COMMON *cm, MACROBLOCKD *xd,
                        const TileInfo *tile, MODE_INFO **mi,
                        unsigned *no_pred_segcounts,
                        unsigned (*temporal_predictor_count)[2],
@@ -145,7 +145,7 @@ static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
     const int pred_segment_id =
         get_segment_id(cm, cm->last_frame_seg_map, bsize, mi_row, mi_col);
     const int pred_flag = pred_segment_id == segment_id;
-    const int pred_context = vp10_get_pred_context_seg_id(xd);
+    const int pred_context = av1_get_pred_context_seg_id(xd);
 
     // Store the prediction status for this mb and update counts
     // as appropriate
@@ -157,7 +157,7 @@ static void count_segs(const VP10_COMMON *cm, MACROBLOCKD *xd,
   }
 }
 
-static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
+static void count_segs_sb(const AV1_COMMON *cm, MACROBLOCKD *xd,
                           const TileInfo *tile, MODE_INFO **mi,
                           unsigned *no_pred_segcounts,
                           unsigned (*temporal_predictor_count)[2],
@@ -204,7 +204,7 @@ static void count_segs_sb(const VP10_COMMON *cm, MACROBLOCKD *xd,
   }
 }
 
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd) {
   struct segmentation *seg = &cm->seg;
 #if CONFIG_MISC_FIXES
   struct segmentation_probs *segp = &cm->fc->seg;
@@ -245,7 +245,7 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
   for (tile_col = 0; tile_col < 1 << cm->log2_tile_cols; tile_col++) {
     TileInfo tile;
     MODE_INFO **mi_ptr;
-    vp10_tile_init(&tile, cm, 0, tile_col);
+    av1_tile_init(&tile, cm, 0, tile_col);
 
     mi_ptr = cm->mi_grid_visible + tile.mi_col_start;
     for (mi_row = 0; mi_row < cm->mi_rows;
@@ -277,7 +277,7 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
       const int count1 = temporal_predictor_count[i][1];
 
 #if CONFIG_MISC_FIXES
-      vp10_prob_diff_update_savings_search(temporal_predictor_count[i],
+      av1_prob_diff_update_savings_search(temporal_predictor_count[i],
                                            segp->pred_probs[i],
                                            &t_nopred_prob[i], DIFF_UPDATE_PROB);
 #else
@@ -285,8 +285,8 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
 #endif
 
       // Add in the predictor signaling cost
-      t_pred_cost += count0 * vp10_cost_zero(t_nopred_prob[i]) +
-                     count1 * vp10_cost_one(t_nopred_prob[i]);
+      t_pred_cost += count0 * av1_cost_zero(t_nopred_prob[i]) +
+                     count1 * av1_cost_one(t_nopred_prob[i]);
     }
   }
 
@@ -306,7 +306,7 @@ void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd) {
   }
 }
 
-void vp10_reset_segment_features(VP10_COMMON *cm) {
+void av1_reset_segment_features(AV1_COMMON *cm) {
   struct segmentation *seg = &cm->seg;
 #if !CONFIG_MISC_FIXES
   struct segmentation_probs *segp = &cm->segp;
@@ -319,5 +319,5 @@ void vp10_reset_segment_features(VP10_COMMON *cm) {
 #if !CONFIG_MISC_FIXES
   memset(segp->tree_probs, 255, sizeof(segp->tree_probs));
 #endif
-  vp10_clearall_segfeatures(seg);
+  av1_clearall_segfeatures(seg);
 }
diff --git a/av1/encoder/segmentation.h b/av1/encoder/segmentation.h
index 620e5714d517db76a4054b4f8b0843e878a7bae0..418dac67abf4afa368c724c8c8ecc49c106abaa8 100644
--- a/av1/encoder/segmentation.h
+++ b/av1/encoder/segmentation.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_SEGMENTATION_H_
-#define VP10_ENCODER_SEGMENTATION_H_
+#ifndef AV1_ENCODER_SEGMENTATION_H_
+#define AV1_ENCODER_SEGMENTATION_H_
 
 #include "av1/common/blockd.h"
 #include "av1/encoder/encoder.h"
@@ -19,12 +19,12 @@
 extern "C" {
 #endif
 
-void vp10_enable_segmentation(struct segmentation *seg);
-void vp10_disable_segmentation(struct segmentation *seg);
+void av1_enable_segmentation(struct segmentation *seg);
+void av1_disable_segmentation(struct segmentation *seg);
 
-void vp10_disable_segfeature(struct segmentation *seg, int segment_id,
+void av1_disable_segfeature(struct segmentation *seg, int segment_id,
                              SEG_LVL_FEATURES feature_id);
-void vp10_clear_segdata(struct segmentation *seg, int segment_id,
+void av1_clear_segdata(struct segmentation *seg, int segment_id,
                         SEG_LVL_FEATURES feature_id);
 
 // The values given for each segment can be either deltas (from the default
@@ -37,15 +37,15 @@ void vp10_clear_segdata(struct segmentation *seg, int segment_id,
 //
 // abs_delta = SEGMENT_DELTADATA (deltas) abs_delta = SEGMENT_ABSDATA (use
 // the absolute values given).
-void vp10_set_segment_data(struct segmentation *seg, signed char *feature_data,
+void av1_set_segment_data(struct segmentation *seg, signed char *feature_data,
                            unsigned char abs_delta);
 
-void vp10_choose_segmap_coding_method(VP10_COMMON *cm, MACROBLOCKD *xd);
+void av1_choose_segmap_coding_method(AV1_COMMON *cm, MACROBLOCKD *xd);
 
-void vp10_reset_segment_features(VP10_COMMON *cm);
+void av1_reset_segment_features(AV1_COMMON *cm);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SEGMENTATION_H_
+#endif  // AV1_ENCODER_SEGMENTATION_H_
diff --git a/av1/encoder/skin_detection.c b/av1/encoder/skin_detection.c
index b24bdae7a3351c59018f87577dedea5d2c216813..11ff16b24aadeb06735134a04992813e2d3050a2 100644
--- a/av1/encoder/skin_detection.c
+++ b/av1/encoder/skin_detection.c
@@ -41,7 +41,7 @@ static int evaluate_skin_color_difference(int cb, int cr) {
   return skin_diff;
 }
 
-int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
+int av1_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
   if (y < y_low || y > y_high)
     return 0;
   else
@@ -50,9 +50,9 @@ int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr) {
 
 #ifdef OUTPUT_YUV_SKINMAP
 // For viewing skin map on input source.
-void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file) {
+void av1_compute_skin_map(AV1_COMP *const cpi, FILE *yuv_skinmap_file) {
   int i, j, mi_row, mi_col;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   uint8_t *y;
   const uint8_t *src_y = cpi->Source->y_buffer;
   const uint8_t *src_u = cpi->Source->u_buffer;
@@ -79,7 +79,7 @@ void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file) {
       const uint8_t ysource = src_y[4 * src_ystride + 4];
       const uint8_t usource = src_u[2 * src_uvstride + 2];
       const uint8_t vsource = src_v[2 * src_uvstride + 2];
-      const int is_skin = vp10_skin_pixel(ysource, usource, vsource);
+      const int is_skin = av1_skin_pixel(ysource, usource, vsource);
       for (i = 0; i < 8; i++) {
         for (j = 0; j < 8; j++) {
           if (is_skin)
@@ -98,7 +98,7 @@ void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file) {
     src_u += (src_uvstride << 2) - ((cm->mi_cols - 1) << 2);
     src_v += (src_uvstride << 2) - ((cm->mi_cols - 1) << 2);
   }
-  vp10_write_yuv_frame_420(&skinmap, yuv_skinmap_file);
+  av1_write_yuv_frame_420(&skinmap, yuv_skinmap_file);
   aom_free_frame_buffer(&skinmap);
 }
 #endif
diff --git a/av1/encoder/skin_detection.h b/av1/encoder/skin_detection.h
index 49c25c1548ea846130705382bbc828352295afc9..407130306dcc0c412c6ece80dd373e13c331022b 100644
--- a/av1/encoder/skin_detection.h
+++ b/av1/encoder/skin_detection.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_SKIN_MAP_H_
-#define VP10_ENCODER_SKIN_MAP_H_
+#ifndef AV1_ENCODER_SKIN_MAP_H_
+#define AV1_ENCODER_SKIN_MAP_H_
 
 #include "av1/common/blockd.h"
 
@@ -18,19 +18,19 @@
 extern "C" {
 #endif
 
-struct VP10_COMP;
+struct AV1_COMP;
 
 // #define OUTPUT_YUV_SKINMAP
 
-int vp10_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr);
+int av1_skin_pixel(const uint8_t y, const uint8_t cb, const uint8_t cr);
 
 #ifdef OUTPUT_YUV_SKINMAP
 // For viewing skin map on input source.
-void vp10_compute_skin_map(VP10_COMP *const cpi, FILE *yuv_skinmap_file);
+void av1_compute_skin_map(AV1_COMP *const cpi, FILE *yuv_skinmap_file);
 #endif
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SKIN_MAP_H_
+#endif  // AV1_ENCODER_SKIN_MAP_H_
diff --git a/av1/encoder/speed_features.c b/av1/encoder/speed_features.c
index 72e2a95f7769412f9d4a17cabca09737b8bb8b9f..5ef444d34e601024aa9c17985a13e5e7817b5dec 100644
--- a/av1/encoder/speed_features.c
+++ b/av1/encoder/speed_features.c
@@ -38,7 +38,7 @@ static unsigned char good_quality_max_mesh_pct[MAX_MESH_SPEED + 1] = {
 
 // Intra only frames, golden frames (except alt ref overlays) and
 // alt ref frames tend to be coded at a higher than ambient quality
-static int frame_is_boosted(const VP10_COMP *cpi) {
+static int frame_is_boosted(const AV1_COMP *cpi) {
   return frame_is_kf_gf_arf(cpi);
 }
 
@@ -48,7 +48,7 @@ static int frame_is_boosted(const VP10_COMP *cpi) {
 // partly on the screen area that over which they propogate. Propogation is
 // limited by transform block size but the screen area take up by a given block
 // size will be larger for a small image format stretched to full screen.
-static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) {
+static BLOCK_SIZE set_partition_min_limit(AV1_COMMON *const cm) {
   unsigned int screen_area = (cm->width * cm->height);
 
   // Select block size based on image format size.
@@ -64,10 +64,10 @@ static BLOCK_SIZE set_partition_min_limit(VP10_COMMON *const cm) {
   }
 }
 
-static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_good_speed_feature_framesize_dependent(AV1_COMP *cpi,
                                                        SPEED_FEATURES *sf,
                                                        int speed) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   if (speed >= 1) {
     if (VPXMIN(cm->width, cm->height) >= 720) {
@@ -115,7 +115,7 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
   // Also if the image edge is internal to the coded area.
   if ((speed >= 1) && (cpi->oxcf.pass == 2) &&
       ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
-       (vp10_internal_image_edge(cpi)))) {
+       (av1_internal_image_edge(cpi)))) {
     sf->disable_split_mask = DISABLE_COMPOUND_SPLIT;
   }
 
@@ -129,7 +129,7 @@ static void set_good_speed_feature_framesize_dependent(VP10_COMP *cpi,
   }
 }
 
-static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
+static void set_good_speed_feature(AV1_COMP *cpi, AV1_COMMON *cm,
                                    SPEED_FEATURES *sf, int speed) {
   const int boosted = frame_is_boosted(cpi);
 
@@ -138,7 +138,7 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
 
   if (speed >= 1) {
     if ((cpi->twopass.fr_content_type == FC_GRAPHICS_ANIMATION) ||
-        vp10_internal_image_edge(cpi)) {
+        av1_internal_image_edge(cpi)) {
       sf->use_square_partition_only = !frame_is_boosted(cpi);
     } else {
       sf->use_square_partition_only = !frame_is_intra_only(cm);
@@ -227,10 +227,10 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
   }
 }
 
-static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
+static void set_rt_speed_feature_framesize_dependent(AV1_COMP *cpi,
                                                      SPEED_FEATURES *sf,
                                                      int speed) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
 
   if (speed >= 1) {
     if (VPXMIN(cm->width, cm->height) >= 720) {
@@ -264,9 +264,9 @@ static void set_rt_speed_feature_framesize_dependent(VP10_COMP *cpi,
   }
 }
 
-static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
+static void set_rt_speed_feature(AV1_COMP *cpi, SPEED_FEATURES *sf, int speed,
                                  aom_tune_content content) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   const int is_keyframe = cm->frame_type == KEY_FRAME;
   const int frames_since_key = is_keyframe ? 0 : cpi->rc.frames_since_key;
   sf->static_segmentation = 0;
@@ -411,9 +411,9 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf, int speed,
   }
 }
 
-void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_dependent(AV1_COMP *cpi) {
   SPEED_FEATURES *const sf = &cpi->sf;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   RD_OPT *const rd = &cpi->rd;
   int i;
 
@@ -440,11 +440,11 @@ void vp10_set_speed_features_framesize_dependent(VP10_COMP *cpi) {
   }
 }
 
-void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
+void av1_set_speed_features_framesize_independent(AV1_COMP *cpi) {
   SPEED_FEATURES *const sf = &cpi->sf;
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &cpi->td.mb;
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   int i;
 
   // best quality defaults
@@ -520,8 +520,8 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
   else if (oxcf->mode == GOOD)
     set_good_speed_feature(cpi, cm, sf, oxcf->speed);
 
-  cpi->full_search_sad = vp10_full_search_sad;
-  cpi->diamond_search_sad = vp10_diamond_search_sad;
+  cpi->full_search_sad = av1_full_search_sad;
+  cpi->diamond_search_sad = av1_diamond_search_sad;
 
   sf->allow_exhaustive_searches = 1;
   if (oxcf->mode == BEST) {
@@ -562,14 +562,14 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
   }
 
   if (sf->mv.subpel_search_method == SUBPEL_TREE) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_MORE) {
-    cpi->find_fractional_mv_step = vp10_find_best_sub_pixel_tree_pruned_more;
+    cpi->find_fractional_mv_step = av1_find_best_sub_pixel_tree_pruned_more;
   } else if (sf->mv.subpel_search_method == SUBPEL_TREE_PRUNED_EVENMORE) {
     cpi->find_fractional_mv_step =
-        vp10_find_best_sub_pixel_tree_pruned_evenmore;
+        av1_find_best_sub_pixel_tree_pruned_evenmore;
   }
 
 #if !CONFIG_AOM_QM
diff --git a/av1/encoder/speed_features.h b/av1/encoder/speed_features.h
index ff0212714f5759bfd7399f585502a298005ed201..349537a18db136470de7c624ab751e69d11856c9 100644
--- a/av1/encoder/speed_features.h
+++ b/av1/encoder/speed_features.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_SPEED_FEATURES_H_
-#define VP10_ENCODER_SPEED_FEATURES_H_
+#ifndef AV1_ENCODER_SPEED_FEATURES_H_
+#define AV1_ENCODER_SPEED_FEATURES_H_
 
 #include "av1/common/enums.h"
 
@@ -415,17 +415,17 @@ typedef struct SPEED_FEATURES {
   // Allow skipping partition search for still image frame
   int allow_partition_search_skip;
 
-  // Fast approximation of vp10_model_rd_from_var_lapndz
+  // Fast approximation of av1_model_rd_from_var_lapndz
   int simple_model_rd_from_var;
 } SPEED_FEATURES;
 
-struct VP10_COMP;
+struct AV1_COMP;
 
-void vp10_set_speed_features_framesize_independent(struct VP10_COMP *cpi);
-void vp10_set_speed_features_framesize_dependent(struct VP10_COMP *cpi);
+void av1_set_speed_features_framesize_independent(struct AV1_COMP *cpi);
+void av1_set_speed_features_framesize_dependent(struct AV1_COMP *cpi);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SPEED_FEATURES_H_
+#endif  // AV1_ENCODER_SPEED_FEATURES_H_
diff --git a/av1/encoder/subexp.c b/av1/encoder/subexp.c
index d1930a21d06353a7ad32ede670aff2936f7c5e52..a89ebcaff5b7119fdfa6193100979b083288f9bd 100644
--- a/av1/encoder/subexp.c
+++ b/av1/encoder/subexp.c
@@ -15,7 +15,7 @@
 #include "av1/encoder/cost.h"
 #include "av1/encoder/subexp.h"
 
-#define vp10_cost_upd256 ((int)(vp10_cost_one(upd) - vp10_cost_zero(upd)))
+#define av1_cost_upd256 ((int)(av1_cost_one(upd) - av1_cost_zero(upd)))
 
 static const uint8_t update_bits[255] = {
   5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,  5,
@@ -116,12 +116,12 @@ static void encode_term_subexp(aom_writer *w, int word) {
   }
 }
 
-void vp10_write_prob_diff_update(aom_writer *w, aom_prob newp, aom_prob oldp) {
+void av1_write_prob_diff_update(aom_writer *w, aom_prob newp, aom_prob oldp) {
   const int delp = remap_prob(newp, oldp);
   encode_term_subexp(w, delp);
 }
 
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
                                          aom_prob *bestp, aom_prob upd) {
   const int old_b = cost_branch256(ct, oldp);
   int bestsavings = 0;
@@ -130,7 +130,7 @@ int vp10_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
 
   for (newp = *bestp; newp != oldp; newp += step) {
     const int new_b = cost_branch256(ct, newp);
-    const int update_b = prob_diff_update_cost(newp, oldp) + vp10_cost_upd256;
+    const int update_b = prob_diff_update_cost(newp, oldp) + av1_cost_upd256;
     const int savings = old_b - new_b - update_b;
     if (savings > bestsavings) {
       bestsavings = savings;
@@ -141,14 +141,14 @@ int vp10_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
   return bestsavings;
 }
 
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
                                                const aom_prob *oldp,
                                                aom_prob *bestp, aom_prob upd,
                                                int stepsize) {
   int i, old_b, new_b, update_b, savings, bestsavings, step;
   int newp;
   aom_prob bestnewp, newplist[ENTROPY_NODES], oldplist[ENTROPY_NODES];
-  vp10_model_to_full_probs(oldp, oldplist);
+  av1_model_to_full_probs(oldp, oldplist);
   memcpy(newplist, oldp, sizeof(aom_prob) * UNCONSTRAINED_NODES);
   for (i = UNCONSTRAINED_NODES, old_b = 0; i < ENTROPY_NODES; ++i)
     old_b += cost_branch256(ct + 2 * i, oldplist[i]);
@@ -162,12 +162,12 @@ int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
     for (newp = *bestp; newp > oldp[PIVOT_NODE]; newp += step) {
       if (newp < 1 || newp > 255) continue;
       newplist[PIVOT_NODE] = newp;
-      vp10_model_to_full_probs(newplist, newplist);
+      av1_model_to_full_probs(newplist, newplist);
       for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
         new_b += cost_branch256(ct + 2 * i, newplist[i]);
       new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
       update_b =
-          prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+          prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
       savings = old_b - new_b - update_b;
       if (savings > bestsavings) {
         bestsavings = savings;
@@ -179,12 +179,12 @@ int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
     for (newp = *bestp; newp < oldp[PIVOT_NODE]; newp += step) {
       if (newp < 1 || newp > 255) continue;
       newplist[PIVOT_NODE] = newp;
-      vp10_model_to_full_probs(newplist, newplist);
+      av1_model_to_full_probs(newplist, newplist);
       for (i = UNCONSTRAINED_NODES, new_b = 0; i < ENTROPY_NODES; ++i)
         new_b += cost_branch256(ct + 2 * i, newplist[i]);
       new_b += cost_branch256(ct + 2 * PIVOT_NODE, newplist[PIVOT_NODE]);
       update_b =
-          prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + vp10_cost_upd256;
+          prob_diff_update_cost(newp, oldp[PIVOT_NODE]) + av1_cost_upd256;
       savings = old_b - new_b - update_b;
       if (savings > bestsavings) {
         bestsavings = savings;
@@ -197,27 +197,27 @@ int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
   return bestsavings;
 }
 
-void vp10_cond_prob_diff_update(aom_writer *w, aom_prob *oldp,
+void av1_cond_prob_diff_update(aom_writer *w, aom_prob *oldp,
                                 const unsigned int ct[2]) {
   const aom_prob upd = DIFF_UPDATE_PROB;
   aom_prob newp = get_binary_prob(ct[0], ct[1]);
   const int savings =
-      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+      av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
   assert(newp >= 1);
   if (savings > 0) {
     aom_write(w, 1, upd);
-    vp10_write_prob_diff_update(w, newp, *oldp);
+    av1_write_prob_diff_update(w, newp, *oldp);
     *oldp = newp;
   } else {
     aom_write(w, 0, upd);
   }
 }
 
-int vp10_cond_prob_diff_update_savings(aom_prob *oldp,
+int av1_cond_prob_diff_update_savings(aom_prob *oldp,
                                        const unsigned int ct[2]) {
   const aom_prob upd = DIFF_UPDATE_PROB;
   aom_prob newp = get_binary_prob(ct[0], ct[1]);
   const int savings =
-      vp10_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
+      av1_prob_diff_update_savings_search(ct, *oldp, &newp, upd);
   return savings;
 }
diff --git a/av1/encoder/subexp.h b/av1/encoder/subexp.h
index 9d870f56b2b6d11126c64ff9bb30f80ee3d93818..d8142bafd4a50ef911edd66645c3a59d8517a1fe 100644
--- a/av1/encoder/subexp.h
+++ b/av1/encoder/subexp.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_SUBEXP_H_
-#define VP10_ENCODER_SUBEXP_H_
+#ifndef AV1_ENCODER_SUBEXP_H_
+#define AV1_ENCODER_SUBEXP_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -20,24 +20,24 @@ extern "C" {
 
 struct aom_writer;
 
-void vp10_write_prob_diff_update(struct aom_writer *w, aom_prob newp,
+void av1_write_prob_diff_update(struct aom_writer *w, aom_prob newp,
                                  aom_prob oldp);
 
-void vp10_cond_prob_diff_update(struct aom_writer *w, aom_prob *oldp,
+void av1_cond_prob_diff_update(struct aom_writer *w, aom_prob *oldp,
                                 const unsigned int ct[2]);
 
-int vp10_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
+int av1_prob_diff_update_savings_search(const unsigned int *ct, aom_prob oldp,
                                          aom_prob *bestp, aom_prob upd);
 
-int vp10_prob_diff_update_savings_search_model(const unsigned int *ct,
+int av1_prob_diff_update_savings_search_model(const unsigned int *ct,
                                                const aom_prob *oldp,
                                                aom_prob *bestp, aom_prob upd,
                                                int stepsize);
 
-int vp10_cond_prob_diff_update_savings(aom_prob *oldp,
+int av1_cond_prob_diff_update_savings(aom_prob *oldp,
                                        const unsigned int ct[2]);
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_SUBEXP_H_
+#endif  // AV1_ENCODER_SUBEXP_H_
diff --git a/av1/encoder/temporal_filter.c b/av1/encoder/temporal_filter.c
index cdad633ec6f992191328765e07464355116cc41c..31dda5b138946e220189166925d07ab08153c399 100644
--- a/av1/encoder/temporal_filter.c
+++ b/av1/encoder/temporal_filter.c
@@ -38,7 +38,7 @@ static void temporal_filter_predictors_mb_c(
   const int which_mv = 0;
   const MV mv = { mv_row, mv_col };
   const InterpKernel *const kernel =
-      vp10_filter_kernels[xd->mi[0]->mbmi.interp_filter];
+      av1_filter_kernels[xd->mi[0]->mbmi.interp_filter];
 
   enum mv_precision mv_precision_uv;
   int uv_stride;
@@ -50,37 +50,37 @@ static void temporal_filter_predictors_mb_c(
     mv_precision_uv = MV_PRECISION_Q3;
   }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-    vp10_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
+    av1_highbd_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv,
                                       scale, 16, 16, which_mv, kernel,
                                       MV_PRECISION_Q3, x, y, xd->bd);
 
-    vp10_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
+    av1_highbd_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256],
                                       uv_block_width, &mv, scale,
                                       uv_block_width, uv_block_height, which_mv,
                                       kernel, mv_precision_uv, x, y, xd->bd);
 
-    vp10_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
+    av1_highbd_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512],
                                       uv_block_width, &mv, scale,
                                       uv_block_width, uv_block_height, which_mv,
                                       kernel, mv_precision_uv, x, y, xd->bd);
     return;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
-  vp10_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
+#endif  // CONFIG_AOM_HIGHBITDEPTH
+  av1_build_inter_predictor(y_mb_ptr, stride, &pred[0], 16, &mv, scale, 16, 16,
                              which_mv, kernel, MV_PRECISION_Q3, x, y);
 
-  vp10_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
+  av1_build_inter_predictor(u_mb_ptr, uv_stride, &pred[256], uv_block_width,
                              &mv, scale, uv_block_width, uv_block_height,
                              which_mv, kernel, mv_precision_uv, x, y);
 
-  vp10_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
+  av1_build_inter_predictor(v_mb_ptr, uv_stride, &pred[512], uv_block_width,
                              &mv, scale, uv_block_width, uv_block_height,
                              which_mv, kernel, mv_precision_uv, x, y);
 }
 
-void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
+void av1_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
                                   uint8_t *frame2, unsigned int block_width,
                                   unsigned int block_height, int strength,
                                   int filter_weight, unsigned int *accumulator,
@@ -119,8 +119,8 @@ void vp10_temporal_filter_apply_c(uint8_t *frame1, unsigned int stride,
   }
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-void vp10_highbd_temporal_filter_apply_c(
+#if CONFIG_AOM_HIGHBITDEPTH
+void av1_highbd_temporal_filter_apply_c(
     uint8_t *frame1_8, unsigned int stride, uint8_t *frame2_8,
     unsigned int block_width, unsigned int block_height, int strength,
     int filter_weight, unsigned int *accumulator, uint16_t *count) {
@@ -159,9 +159,9 @@ void vp10_highbd_temporal_filter_apply_c(
     byte += stride - block_width;
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
+static int temporal_filter_find_matching_mb_c(AV1_COMP *cpi,
                                               uint8_t *arf_frame_buf,
                                               uint8_t *frame_ptr_buf,
                                               int stride) {
@@ -196,7 +196,7 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
   step_param = VPXMIN(step_param, MAX_MVSEARCH_STEPS - 2);
 
   // Ignore mv costing by sending NULL pointer instead of cost arrays
-  vp10_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
+  av1_hex_search(x, &best_ref_mv1_full, step_param, sadpb, 1,
                   cond_cost_list(cpi, cost_list), &cpi->fn_ptr[BLOCK_16X16], 0,
                   &best_ref_mv1, ref_mv);
 
@@ -214,7 +214,7 @@ static int temporal_filter_find_matching_mb_c(VP10_COMP *cpi,
   return bestsme;
 }
 
-static void temporal_filter_iterate_c(VP10_COMP *cpi,
+static void temporal_filter_iterate_c(AV1_COMP *cpi,
                                       YV12_BUFFER_CONFIG **frames,
                                       int frame_count, int alt_ref_index,
                                       int strength,
@@ -232,7 +232,7 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
   MACROBLOCKD *mbd = &cpi->td.mb.e_mbd;
   YV12_BUFFER_CONFIG *f = frames[alt_ref_index];
   uint8_t *dst1, *dst2;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, predictor16[16 * 16 * 3]);
   DECLARE_ALIGNED(16, uint8_t, predictor8[16 * 16 * 3]);
   uint8_t *predictor;
@@ -245,7 +245,7 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
   // Save input state
   uint8_t *input_buffer[MAX_MB_PLANE];
   int i;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
     predictor = CONVERT_TO_BYTEPTR(predictor16);
   } else {
@@ -315,53 +315,53 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
               mbd->mi[0]->bmi[0].as_mv[0].as_mv.col, predictor, scale,
               mb_col * 16, mb_row * 16);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
             int adj_strength = strength + 2 * (mbd->bd - 8);
             // Apply the filter (YUV)
-            vp10_highbd_temporal_filter_apply(
+            av1_highbd_temporal_filter_apply(
                 f->y_buffer + mb_y_offset, f->y_stride, predictor, 16, 16,
                 adj_strength, filter_weight, accumulator, count);
-            vp10_highbd_temporal_filter_apply(
+            av1_highbd_temporal_filter_apply(
                 f->u_buffer + mb_uv_offset, f->uv_stride, predictor + 256,
                 mb_uv_width, mb_uv_height, adj_strength, filter_weight,
                 accumulator + 256, count + 256);
-            vp10_highbd_temporal_filter_apply(
+            av1_highbd_temporal_filter_apply(
                 f->v_buffer + mb_uv_offset, f->uv_stride, predictor + 512,
                 mb_uv_width, mb_uv_height, adj_strength, filter_weight,
                 accumulator + 512, count + 512);
           } else {
             // Apply the filter (YUV)
-            vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+            av1_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
                                        predictor, 16, 16, strength,
                                        filter_weight, accumulator, count);
-            vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+            av1_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
                                        predictor + 256, mb_uv_width,
                                        mb_uv_height, strength, filter_weight,
                                        accumulator + 256, count + 256);
-            vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+            av1_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
                                        predictor + 512, mb_uv_width,
                                        mb_uv_height, strength, filter_weight,
                                        accumulator + 512, count + 512);
           }
 #else
           // Apply the filter (YUV)
-          vp10_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
+          av1_temporal_filter_apply(f->y_buffer + mb_y_offset, f->y_stride,
                                      predictor, 16, 16, strength, filter_weight,
                                      accumulator, count);
-          vp10_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
+          av1_temporal_filter_apply(f->u_buffer + mb_uv_offset, f->uv_stride,
                                      predictor + 256, mb_uv_width, mb_uv_height,
                                      strength, filter_weight, accumulator + 256,
                                      count + 256);
-          vp10_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
+          av1_temporal_filter_apply(f->v_buffer + mb_uv_offset, f->uv_stride,
                                      predictor + 512, mb_uv_width, mb_uv_height,
                                      strength, filter_weight, accumulator + 512,
                                      count + 512);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (mbd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
         uint16_t *dst1_16;
         uint16_t *dst2_16;
@@ -481,7 +481,7 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
         }
         byte += stride - mb_uv_width;
       }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       mb_y_offset += 16;
       mb_uv_offset += mb_uv_width;
     }
@@ -494,11 +494,11 @@ static void temporal_filter_iterate_c(VP10_COMP *cpi,
 }
 
 // Apply buffer limits and context specific adjustments to arnr filter.
-static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
+static void adjust_arnr_filter(AV1_COMP *cpi, int distance, int group_boost,
                                int *arnr_frames, int *arnr_strength) {
-  const VP10EncoderConfig *const oxcf = &cpi->oxcf;
+  const AV1EncoderConfig *const oxcf = &cpi->oxcf;
   const int frames_after_arf =
-      vp10_lookahead_depth(cpi->lookahead) - distance - 1;
+      av1_lookahead_depth(cpi->lookahead) - distance - 1;
   int frames_fwd = (cpi->oxcf.arnr_max_frames - 1) >> 1;
   int frames_bwd;
   int q, frames, strength;
@@ -518,10 +518,10 @@ static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
 
   // Adjust the strength based on active max q.
   if (cpi->common.current_video_frame > 1)
-    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
+    q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[INTER_FRAME],
                                        cpi->common.bit_depth));
   else
-    q = ((int)vp10_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
+    q = ((int)av1_convert_qindex_to_q(cpi->rc.avg_frame_qindex[KEY_FRAME],
                                        cpi->common.bit_depth));
   if (q > 16) {
     strength = oxcf->arnr_strength;
@@ -552,7 +552,7 @@ static void adjust_arnr_filter(VP10_COMP *cpi, int distance, int group_boost,
   *arnr_strength = strength;
 }
 
-void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
+void av1_temporal_filter(AV1_COMP *cpi, int distance) {
   RATE_CONTROL *const rc = &cpi->rc;
   int frame;
   int frames_to_blur;
@@ -573,7 +573,7 @@ void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
   for (frame = 0; frame < frames_to_blur; ++frame) {
     const int which_buffer = start_frame - frame;
     struct lookahead_entry *buf =
-        vp10_lookahead_peek(cpi->lookahead, which_buffer);
+        av1_lookahead_peek(cpi->lookahead, which_buffer);
     frames[frames_to_blur - 1 - frame] = &buf->img;
   }
 
@@ -581,16 +581,16 @@ void vp10_temporal_filter(VP10_COMP *cpi, int distance) {
 // Setup scaling factors. Scaling on each of the arnr frames is not
 // supported.
 // ARF is produced at the native frame size and resized when coded.
-#if CONFIG_VPX_HIGHBITDEPTH
-    vp10_setup_scale_factors_for_frame(
+#if CONFIG_AOM_HIGHBITDEPTH
+    av1_setup_scale_factors_for_frame(
         &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
         frames[0]->y_crop_width, frames[0]->y_crop_height,
         cpi->common.use_highbitdepth);
 #else
-    vp10_setup_scale_factors_for_frame(
+    av1_setup_scale_factors_for_frame(
         &sf, frames[0]->y_crop_width, frames[0]->y_crop_height,
         frames[0]->y_crop_width, frames[0]->y_crop_height);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   temporal_filter_iterate_c(cpi, frames, frames_to_blur,
diff --git a/av1/encoder/temporal_filter.h b/av1/encoder/temporal_filter.h
index 6b68cb770d3389320d4dfcaefd95707395a8738e..bc0863a6381a45cee79f5780d6ef559dbd00d368 100644
--- a/av1/encoder/temporal_filter.h
+++ b/av1/encoder/temporal_filter.h
@@ -9,17 +9,17 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_TEMPORAL_FILTER_H_
-#define VP10_ENCODER_TEMPORAL_FILTER_H_
+#ifndef AV1_ENCODER_TEMPORAL_FILTER_H_
+#define AV1_ENCODER_TEMPORAL_FILTER_H_
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
-void vp10_temporal_filter(VP10_COMP *cpi, int distance);
+void av1_temporal_filter(AV1_COMP *cpi, int distance);
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_TEMPORAL_FILTER_H_
+#endif  // AV1_ENCODER_TEMPORAL_FILTER_H_
diff --git a/av1/encoder/tokenize.c b/av1/encoder/tokenize.c
index 21fd17c767634b791e2ea71b0d0e0d38382ce365..0c386813b3d327cadaa0371d5b69dcf7b2b12150 100644
--- a/av1/encoder/tokenize.c
+++ b/av1/encoder/tokenize.c
@@ -46,13 +46,13 @@ static const TOKENVALUE dct_cat_lt_10_value_tokens[] = {
   { 9, 36 }, { 9, 38 }, { 9, 40 }, { 9, 42 }, { 9, 44 }, { 9, 46 }, { 9, 48 },
   { 9, 50 }, { 9, 52 }, { 9, 54 }, { 9, 56 }, { 9, 58 }, { 9, 60 }, { 9, 62 }
 };
-const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens =
+const TOKENVALUE *av1_dct_cat_lt_10_value_tokens =
     dct_cat_lt_10_value_tokens +
     (sizeof(dct_cat_lt_10_value_tokens) / sizeof(*dct_cat_lt_10_value_tokens)) /
         2;
 
 // Array indices are identical to previously-existing CONTEXT_NODE indices
-const aom_tree_index vp10_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
+const aom_tree_index av1_coef_tree[TREE_SIZE(ENTROPY_TOKENS)] = {
   -EOB_TOKEN,
   2,  // 0  = EOB
   -ZERO_TOKEN,
@@ -100,7 +100,7 @@ static const int16_t cat5_cost[1 << 5] = {
   2986, 3044, 3067, 3113, 3136, 3190, 3213, 3259, 3282, 3340, 3363,
   3409, 3432, 3531, 3554, 3600, 3623, 3681, 3704, 3750, 3773
 };
-const int16_t vp10_cat6_low_cost[256] = {
+const int16_t av1_cat6_low_cost[256] = {
   3378, 3390, 3401, 3413, 3435, 3447, 3458, 3470, 3517, 3529, 3540, 3552, 3574,
   3586, 3597, 3609, 3671, 3683, 3694, 3706, 3728, 3740, 3751, 3763, 3810, 3822,
   3833, 3845, 3867, 3879, 3890, 3902, 3973, 3985, 3996, 4008, 4030, 4042, 4053,
@@ -122,7 +122,7 @@ const int16_t vp10_cat6_low_cost[256] = {
   6620, 6632, 6654, 6666, 6677, 6689, 6751, 6763, 6774, 6786, 6808, 6820, 6831,
   6843, 6890, 6902, 6913, 6925, 6947, 6959, 6970, 6982
 };
-const int vp10_cat6_high_cost[64] = {
+const int av1_cat6_high_cost[64] = {
   88,    2251,  2727,  4890,  3148,  5311,  5787,  7950,  3666,  5829,  6305,
   8468,  6726,  8889,  9365,  11528, 3666,  5829,  6305,  8468,  6726,  8889,
   9365,  11528, 7244,  9407,  9883,  12046, 10304, 12467, 12943, 15106, 3666,
@@ -131,8 +131,8 @@ const int vp10_cat6_high_cost[64] = {
   15106, 10822, 12985, 13461, 15624, 13882, 16045, 16521, 18684
 };
 
-#if CONFIG_VPX_HIGHBITDEPTH
-const int vp10_cat6_high10_high_cost[256] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const int av1_cat6_high10_high_cost[256] = {
   94,    2257,  2733,  4896,  3154,  5317,  5793,  7956,  3672,  5835,  6311,
   8474,  6732,  8895,  9371,  11534, 3672,  5835,  6311,  8474,  6732,  8895,
   9371,  11534, 7250,  9413,  9889,  12052, 10310, 12473, 12949, 15112, 3672,
@@ -158,7 +158,7 @@ const int vp10_cat6_high10_high_cost[256] = {
   18075, 20238, 18496, 20659, 21135, 23298, 19014, 21177, 21653, 23816, 22074,
   24237, 24713, 26876
 };
-const int vp10_cat6_high12_high_cost[1024] = {
+const int av1_cat6_high12_high_cost[1024] = {
   100,   2263,  2739,  4902,  3160,  5323,  5799,  7962,  3678,  5841,  6317,
   8480,  6738,  8901,  9377,  11540, 3678,  5841,  6317,  8480,  6738,  8901,
   9377,  11540, 7256,  9419,  9895,  12058, 10316, 12479, 12955, 15118, 3678,
@@ -256,7 +256,7 @@ const int vp10_cat6_high12_high_cost[1024] = {
 };
 #endif
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static const aom_tree_index cat1_high10[2] = { 0, 0 };
 static const aom_tree_index cat2_high10[4] = { 2, 2, 0, 0 };
 static const aom_tree_index cat3_high10[6] = { 2, 2, 4, 4, 0, 0 };
@@ -277,59 +277,59 @@ static const aom_tree_index cat6_high12[36] = {
 };
 #endif
 
-const vp10_extra_bit vp10_extra_bits[ENTROPY_TOKENS] = {
+const av1_extra_bit av1_extra_bits[ENTROPY_TOKENS] = {
   { 0, 0, 0, 0, zero_cost },                             // ZERO_TOKEN
   { 0, 0, 0, 1, sign_cost },                             // ONE_TOKEN
   { 0, 0, 0, 2, sign_cost },                             // TWO_TOKEN
   { 0, 0, 0, 3, sign_cost },                             // THREE_TOKEN
   { 0, 0, 0, 4, sign_cost },                             // FOUR_TOKEN
-  { cat1, vp10_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost },  // CATEGORY1_TOKEN
-  { cat2, vp10_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost },  // CATEGORY2_TOKEN
-  { cat3, vp10_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost },  // CATEGORY3_TOKEN
-  { cat4, vp10_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost },  // CATEGORY4_TOKEN
-  { cat5, vp10_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost },  // CATEGORY5_TOKEN
-  { cat6, vp10_cat6_prob, 14, CAT6_MIN_VAL, 0 },         // CATEGORY6_TOKEN
+  { cat1, av1_cat1_prob, 1, CAT1_MIN_VAL, cat1_cost },  // CATEGORY1_TOKEN
+  { cat2, av1_cat2_prob, 2, CAT2_MIN_VAL, cat2_cost },  // CATEGORY2_TOKEN
+  { cat3, av1_cat3_prob, 3, CAT3_MIN_VAL, cat3_cost },  // CATEGORY3_TOKEN
+  { cat4, av1_cat4_prob, 4, CAT4_MIN_VAL, cat4_cost },  // CATEGORY4_TOKEN
+  { cat5, av1_cat5_prob, 5, CAT5_MIN_VAL, cat5_cost },  // CATEGORY5_TOKEN
+  { cat6, av1_cat6_prob, 14, CAT6_MIN_VAL, 0 },         // CATEGORY6_TOKEN
   { 0, 0, 0, 0, zero_cost }                              // EOB_TOKEN
 };
 
-#if CONFIG_VPX_HIGHBITDEPTH
-const vp10_extra_bit vp10_extra_bits_high10[ENTROPY_TOKENS] = {
+#if CONFIG_AOM_HIGHBITDEPTH
+const av1_extra_bit av1_extra_bits_high10[ENTROPY_TOKENS] = {
   { 0, 0, 0, 0, zero_cost },                                           // ZERO
   { 0, 0, 0, 1, sign_cost },                                           // ONE
   { 0, 0, 0, 2, sign_cost },                                           // TWO
   { 0, 0, 0, 3, sign_cost },                                           // THREE
   { 0, 0, 0, 4, sign_cost },                                           // FOUR
-  { cat1_high10, vp10_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
-  { cat2_high10, vp10_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
-  { cat3_high10, vp10_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
-  { cat4_high10, vp10_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
-  { cat5_high10, vp10_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
-  { cat6_high10, vp10_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 },         // CAT6
+  { cat1_high10, av1_cat1_prob_high10, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high10, av1_cat2_prob_high10, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high10, av1_cat3_prob_high10, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high10, av1_cat4_prob_high10, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high10, av1_cat5_prob_high10, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high10, av1_cat6_prob_high10, 16, CAT6_MIN_VAL, 0 },         // CAT6
   { 0, 0, 0, 0, zero_cost }                                            // EOB
 };
-const vp10_extra_bit vp10_extra_bits_high12[ENTROPY_TOKENS] = {
+const av1_extra_bit av1_extra_bits_high12[ENTROPY_TOKENS] = {
   { 0, 0, 0, 0, zero_cost },                                           // ZERO
   { 0, 0, 0, 1, sign_cost },                                           // ONE
   { 0, 0, 0, 2, sign_cost },                                           // TWO
   { 0, 0, 0, 3, sign_cost },                                           // THREE
   { 0, 0, 0, 4, sign_cost },                                           // FOUR
-  { cat1_high12, vp10_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
-  { cat2_high12, vp10_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
-  { cat3_high12, vp10_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
-  { cat4_high12, vp10_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
-  { cat5_high12, vp10_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
-  { cat6_high12, vp10_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 },         // CAT6
+  { cat1_high12, av1_cat1_prob_high12, 1, CAT1_MIN_VAL, cat1_cost },  // CAT1
+  { cat2_high12, av1_cat2_prob_high12, 2, CAT2_MIN_VAL, cat2_cost },  // CAT2
+  { cat3_high12, av1_cat3_prob_high12, 3, CAT3_MIN_VAL, cat3_cost },  // CAT3
+  { cat4_high12, av1_cat4_prob_high12, 4, CAT4_MIN_VAL, cat4_cost },  // CAT4
+  { cat5_high12, av1_cat5_prob_high12, 5, CAT5_MIN_VAL, cat5_cost },  // CAT5
+  { cat6_high12, av1_cat6_prob_high12, 18, CAT6_MIN_VAL, 0 },         // CAT6
   { 0, 0, 0, 0, zero_cost }                                            // EOB
 };
 #endif
 
-const struct vp10_token vp10_coef_encodings[ENTROPY_TOKENS] = {
+const struct av1_token av1_coef_encodings[ENTROPY_TOKENS] = {
   { 2, 2 },  { 6, 3 },   { 28, 5 },  { 58, 6 },  { 59, 6 },  { 60, 6 },
   { 61, 6 }, { 124, 7 }, { 125, 7 }, { 126, 7 }, { 127, 7 }, { 0, 1 }
 };
 
 struct tokenize_b_args {
-  VP10_COMP *cpi;
+  AV1_COMP *cpi;
   ThreadData *td;
   TOKENEXTRA **tp;
 };
@@ -343,7 +343,7 @@ static void set_entropy_context_b(int plane, int block, int blk_row,
   MACROBLOCKD *const xd = &x->e_mbd;
   struct macroblock_plane *p = &x->plane[plane];
   struct macroblockd_plane *pd = &xd->plane[plane];
-  vp10_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
+  av1_set_contexts(xd, pd, plane_bsize, tx_size, p->eobs[block] > 0, blk_col,
                     blk_row);
 }
 
@@ -378,7 +378,7 @@ static INLINE int get_tx_eob(const struct segmentation *seg, int segment_id,
 static void tokenize_b(int plane, int block, int blk_row, int blk_col,
                        BLOCK_SIZE plane_bsize, TX_SIZE tx_size, void *arg) {
   struct tokenize_b_args *const args = arg;
-  VP10_COMP *cpi = args->cpi;
+  AV1_COMP *cpi = args->cpi;
   ThreadData *const td = args->td;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
@@ -431,13 +431,13 @@ static void tokenize_b(int plane, int block, int blk_row, int blk_col,
       v = qcoeff[scan[c]];
     }
 
-    vp10_get_token_extra(v, &token, &extra);
+    av1_get_token_extra(v, &token, &extra);
 
     add_token(&t, coef_probs[band[c]][pt], extra, (uint8_t)token,
               (uint8_t)skip_eob, counts[band[c]][pt]);
     eob_branch[band[c]][pt] += !skip_eob;
 
-    token_cache[scan[c]] = vp10_pt_energy_class[token];
+    token_cache[scan[c]] = av1_pt_energy_class[token];
     ++c;
     pt = get_coef_context(nb, token_cache, c);
   }
@@ -449,7 +449,7 @@ static void tokenize_b(int plane, int block, int blk_row, int blk_col,
 
   *tp = t;
 
-  vp10_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
+  av1_set_contexts(xd, pd, plane_bsize, tx_size, c > 0, blk_col, blk_row);
 }
 
 struct is_skippable_args {
@@ -468,11 +468,11 @@ static void is_skippable(int plane, int block, int blk_row, int blk_col,
 }
 
 // TODO(yaowu): rewrite and optimize this function to remove the usage of
-//              vp10_foreach_transform_block() and simplify is_skippable().
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+//              av1_foreach_transform_block() and simplify is_skippable().
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   int result = 1;
   struct is_skippable_args args = { x->plane[plane].eobs, &result };
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
+  av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane, is_skippable,
                                           &args);
   return result;
 }
@@ -490,21 +490,21 @@ static void has_high_freq_coeff(int plane, int block, int blk_row, int blk_col,
   *(args->skippable) |= (args->eobs[block] > eobs);
 }
 
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane) {
   int result = 0;
   struct is_skippable_args args = { x->plane[plane].eobs, &result };
-  vp10_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
+  av1_foreach_transformed_block_in_plane(&x->e_mbd, bsize, plane,
                                           has_high_freq_coeff, &args);
   return result;
 }
 
-void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
+void av1_tokenize_sb(AV1_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
                       int dry_run, BLOCK_SIZE bsize) {
-  VP10_COMMON *const cm = &cpi->common;
+  AV1_COMMON *const cm = &cpi->common;
   MACROBLOCK *const x = &td->mb;
   MACROBLOCKD *const xd = &x->e_mbd;
   MB_MODE_INFO *const mbmi = &xd->mi[0]->mbmi;
-  const int ctx = vp10_get_skip_context(xd);
+  const int ctx = av1_get_skip_context(xd);
   const int skip_inc =
       !segfeature_active(&cm->seg, mbmi->segment_id, SEG_LVL_SKIP);
   struct tokenize_b_args arg = { cpi, td, t };
@@ -520,12 +520,12 @@ void vp10_tokenize_sb(VP10_COMP *cpi, ThreadData *td, TOKENEXTRA **t,
     td->counts->skip[ctx][0] += skip_inc;
 
     for (plane = 0; plane < MAX_MB_PLANE; ++plane) {
-      vp10_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
+      av1_foreach_transformed_block_in_plane(xd, bsize, plane, tokenize_b,
                                               &arg);
       (*t)->token = EOSB_TOKEN;
       (*t)++;
     }
   } else {
-    vp10_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
+    av1_foreach_transformed_block(xd, bsize, set_entropy_context_b, &arg);
   }
 }
diff --git a/av1/encoder/tokenize.h b/av1/encoder/tokenize.h
index a7a37cb211bc5161db8d3d8e25dcc476f684a27b..3ab8193a374e4e04add1e3490127148e143414f8 100644
--- a/av1/encoder/tokenize.h
+++ b/av1/encoder/tokenize.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_TOKENIZE_H_
-#define VP10_ENCODER_TOKENIZE_H_
+#ifndef AV1_ENCODER_TOKENIZE_H_
+#define AV1_ENCODER_TOKENIZE_H_
 
 #include "av1/common/entropy.h"
 
@@ -23,7 +23,7 @@ extern "C" {
 
 #define EOSB_TOKEN 127  // Not signalled, encoder only
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef int32_t EXTRABIT;
 #else
 typedef int16_t EXTRABIT;
@@ -41,52 +41,52 @@ typedef struct {
   uint8_t skip_eob_node;
 } TOKENEXTRA;
 
-extern const aom_tree_index vp10_coef_tree[];
-extern const aom_tree_index vp10_coef_con_tree[];
-extern const struct vp10_token vp10_coef_encodings[];
+extern const aom_tree_index av1_coef_tree[];
+extern const aom_tree_index av1_coef_con_tree[];
+extern const struct av1_token av1_coef_encodings[];
 
-int vp10_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
-int vp10_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_is_skippable_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
+int av1_has_high_freq_in_plane(MACROBLOCK *x, BLOCK_SIZE bsize, int plane);
 
-struct VP10_COMP;
+struct AV1_COMP;
 struct ThreadData;
 
-void vp10_tokenize_sb(struct VP10_COMP *cpi, struct ThreadData *td,
+void av1_tokenize_sb(struct AV1_COMP *cpi, struct ThreadData *td,
                       TOKENEXTRA **t, int dry_run, BLOCK_SIZE bsize);
 
-extern const int16_t *vp10_dct_value_cost_ptr;
+extern const int16_t *av1_dct_value_cost_ptr;
 /* TODO: The Token field should be broken out into a separate char array to
  *  improve cache locality, since it's needed for costing when the rest of the
  *  fields are not.
  */
-extern const TOKENVALUE *vp10_dct_value_tokens_ptr;
-extern const TOKENVALUE *vp10_dct_cat_lt_10_value_tokens;
-extern const int16_t vp10_cat6_low_cost[256];
-extern const int vp10_cat6_high_cost[64];
-extern const int vp10_cat6_high10_high_cost[256];
-extern const int vp10_cat6_high12_high_cost[1024];
-static INLINE int vp10_get_cost(int16_t token, EXTRABIT extrabits,
+extern const TOKENVALUE *av1_dct_value_tokens_ptr;
+extern const TOKENVALUE *av1_dct_cat_lt_10_value_tokens;
+extern const int16_t av1_cat6_low_cost[256];
+extern const int av1_cat6_high_cost[64];
+extern const int av1_cat6_high10_high_cost[256];
+extern const int av1_cat6_high12_high_cost[1024];
+static INLINE int av1_get_cost(int16_t token, EXTRABIT extrabits,
                                 const int *cat6_high_table) {
   if (token != CATEGORY6_TOKEN)
-    return vp10_extra_bits[token].cost[extrabits >> 1];
-  return vp10_cat6_low_cost[(extrabits >> 1) & 0xff] +
+    return av1_extra_bits[token].cost[extrabits >> 1];
+  return av1_cat6_low_cost[(extrabits >> 1) & 0xff] +
          cat6_high_table[extrabits >> 9];
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
-  return bit_depth == 8 ? vp10_cat6_high_cost
-                        : (bit_depth == 10 ? vp10_cat6_high10_high_cost
-                                           : vp10_cat6_high12_high_cost);
+#if CONFIG_AOM_HIGHBITDEPTH
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
+  return bit_depth == 8 ? av1_cat6_high_cost
+                        : (bit_depth == 10 ? av1_cat6_high10_high_cost
+                                           : av1_cat6_high12_high_cost);
 }
 #else
-static INLINE const int *vp10_get_high_cost_table(int bit_depth) {
+static INLINE const int *av1_get_high_cost_table(int bit_depth) {
   (void)bit_depth;
-  return vp10_cat6_high_cost;
+  return av1_cat6_high_cost;
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-static INLINE void vp10_get_token_extra(int v, int16_t *token,
+static INLINE void av1_get_token_extra(int v, int16_t *token,
                                         EXTRABIT *extra) {
   if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) {
     *token = CATEGORY6_TOKEN;
@@ -96,16 +96,16 @@ static INLINE void vp10_get_token_extra(int v, int16_t *token,
       *extra = -2 * v - 2 * CAT6_MIN_VAL + 1;
     return;
   }
-  *token = vp10_dct_cat_lt_10_value_tokens[v].token;
-  *extra = vp10_dct_cat_lt_10_value_tokens[v].extra;
+  *token = av1_dct_cat_lt_10_value_tokens[v].token;
+  *extra = av1_dct_cat_lt_10_value_tokens[v].extra;
 }
-static INLINE int16_t vp10_get_token(int v) {
+static INLINE int16_t av1_get_token(int v) {
   if (v >= CAT6_MIN_VAL || v <= -CAT6_MIN_VAL) return 10;
-  return vp10_dct_cat_lt_10_value_tokens[v].token;
+  return av1_dct_cat_lt_10_value_tokens[v].token;
 }
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_TOKENIZE_H_
+#endif  // AV1_ENCODER_TOKENIZE_H_
diff --git a/av1/encoder/treewriter.c b/av1/encoder/treewriter.c
index e1b98871bab7871a1ec162bed22efdd8e6e614dd..7c35b564af252bc08769920ab6d50f1c5f8944dd 100644
--- a/av1/encoder/treewriter.c
+++ b/av1/encoder/treewriter.c
@@ -11,7 +11,7 @@
 
 #include "av1/encoder/treewriter.h"
 
-static void tree2tok(struct vp10_token *tokens, const aom_tree_index *tree,
+static void tree2tok(struct av1_token *tokens, const aom_tree_index *tree,
                      int i, int v, int l) {
   v += v;
   ++l;
@@ -27,7 +27,7 @@ static void tree2tok(struct vp10_token *tokens, const aom_tree_index *tree,
   } while (++v & 1);
 }
 
-void vp10_tokens_from_tree(struct vp10_token *tokens,
+void av1_tokens_from_tree(struct av1_token *tokens,
                            const aom_tree_index *tree) {
   tree2tok(tokens, tree, 0, 0, 0);
 }
@@ -52,7 +52,7 @@ static unsigned int convert_distribution(unsigned int i, aom_tree tree,
   return left + right;
 }
 
-void vp10_tree_probs_from_distribution(aom_tree tree,
+void av1_tree_probs_from_distribution(aom_tree tree,
                                        unsigned int branch_ct[/* n-1 */][2],
                                        const unsigned int num_events[/* n */]) {
   convert_distribution(0, tree, branch_ct, num_events);
diff --git a/av1/encoder/treewriter.h b/av1/encoder/treewriter.h
index 306bd4a92286e13c50c4bd52ece36163e7e4ecea..38ab5a6a25286ee371f2b68a37603ab51227fd94 100644
--- a/av1/encoder/treewriter.h
+++ b/av1/encoder/treewriter.h
@@ -9,8 +9,8 @@
  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
  */
 
-#ifndef VP10_ENCODER_TREEWRITER_H_
-#define VP10_ENCODER_TREEWRITER_H_
+#ifndef AV1_ENCODER_TREEWRITER_H_
+#define AV1_ENCODER_TREEWRITER_H_
 
 #include "aom_dsp/bitwriter.h"
 
@@ -18,18 +18,18 @@
 extern "C" {
 #endif
 
-void vp10_tree_probs_from_distribution(aom_tree tree,
+void av1_tree_probs_from_distribution(aom_tree tree,
                                        unsigned int branch_ct[/* n - 1 */][2],
                                        const unsigned int num_events[/* n */]);
 
-struct vp10_token {
+struct av1_token {
   int value;
   int len;
 };
 
-void vp10_tokens_from_tree(struct vp10_token *, const aom_tree_index *);
+void av1_tokens_from_tree(struct av1_token *, const aom_tree_index *);
 
-static INLINE void vp10_write_tree(aom_writer *w, const aom_tree_index *tree,
+static INLINE void av1_write_tree(aom_writer *w, const aom_tree_index *tree,
                                    const aom_prob *probs, int bits, int len,
                                    aom_tree_index i) {
   do {
@@ -39,14 +39,14 @@ static INLINE void vp10_write_tree(aom_writer *w, const aom_tree_index *tree,
   } while (len);
 }
 
-static INLINE void vp10_write_token(aom_writer *w, const aom_tree_index *tree,
+static INLINE void av1_write_token(aom_writer *w, const aom_tree_index *tree,
                                     const aom_prob *probs,
-                                    const struct vp10_token *token) {
-  vp10_write_tree(w, tree, probs, token->value, token->len, 0);
+                                    const struct av1_token *token) {
+  av1_write_tree(w, tree, probs, token->value, token->len, 0);
 }
 
 #ifdef __cplusplus
 }  // extern "C"
 #endif
 
-#endif  // VP10_ENCODER_TREEWRITER_H_
+#endif  // AV1_ENCODER_TREEWRITER_H_
diff --git a/av1/encoder/x86/dct_mmx.asm b/av1/encoder/x86/dct_mmx.asm
index 34ce315310d76f07caa96ed383a930f7f0e58bfc..2154300f36f4835d948ecafbb29b754117c28ddd 100644
--- a/av1/encoder/x86/dct_mmx.asm
+++ b/av1/encoder/x86/dct_mmx.asm
@@ -8,7 +8,7 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vp10
+%define private_prefix av1
 
 %include "third_party/x86inc/x86inc.asm"
 
@@ -65,7 +65,7 @@ cglobal fwht4x4, 3, 4, 8, input, output, stride
   psllw           m2,        2
   psllw           m3,        2
 
-%if CONFIG_VPX_HIGHBITDEPTH
+%if CONFIG_AOM_HIGHBITDEPTH
   pxor            m4,             m4
   pxor            m5,             m5
   pcmpgtw         m4,             m0
diff --git a/av1/encoder/x86/dct_sse2.c b/av1/encoder/x86/dct_sse2.c
index 54a9d2daeef0768d29527e0e0c183b3aa7e638fd..693fed593bef1c8576f17521dd5d6a5771bbf177 100644
--- a/av1/encoder/x86/dct_sse2.c
+++ b/av1/encoder/x86/dct_sse2.c
@@ -152,7 +152,7 @@ static void fadst4_sse2(__m128i *in) {
   transpose_4x4(in);
 }
 
-void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
                       int tx_type) {
   __m128i in[4];
 
@@ -180,7 +180,7 @@ void vp10_fht4x4_sse2(const int16_t *input, tran_low_t *output, int stride,
   }
 }
 
-void vp10_fdct8x8_quant_sse2(
+void av1_fdct8x8_quant_sse2(
     const int16_t *input, int stride, int16_t *coeff_ptr, intptr_t n_coeffs,
     int skip_block, const int16_t *zbin_ptr, const int16_t *round_ptr,
     const int16_t *quant_ptr, const int16_t *quant_shift_ptr,
@@ -1129,7 +1129,7 @@ static void fadst8_sse2(__m128i *in) {
   array_transpose_8x8(in, in);
 }
 
-void vp10_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht8x8_sse2(const int16_t *input, tran_low_t *output, int stride,
                       int tx_type) {
   __m128i in[8];
 
@@ -2012,7 +2012,7 @@ static void fadst16_sse2(__m128i *in0, __m128i *in1) {
   array_transpose_16x16(in0, in1);
 }
 
-void vp10_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
+void av1_fht16x16_sse2(const int16_t *input, tran_low_t *output, int stride,
                         int tx_type) {
   __m128i in0[16], in1[16];
 
diff --git a/av1/encoder/x86/dct_ssse3.c b/av1/encoder/x86/dct_ssse3.c
index 91cbec263229f84b9f12038d1ae836939e817b06..0b5ae83a518f00b6a049283209e71d4f4eba6b80 100644
--- a/av1/encoder/x86/dct_ssse3.c
+++ b/av1/encoder/x86/dct_ssse3.c
@@ -21,7 +21,7 @@
 #include "aom_dsp/x86/inv_txfm_sse2.h"
 #include "aom_dsp/x86/txfm_common_sse2.h"
 
-void vp10_fdct8x8_quant_ssse3(
+void av1_fdct8x8_quant_ssse3(
     const int16_t* input, int stride, int16_t* coeff_ptr, intptr_t n_coeffs,
     int skip_block, const int16_t* zbin_ptr, const int16_t* round_ptr,
     const int16_t* quant_ptr, const int16_t* quant_shift_ptr,
diff --git a/av1/encoder/x86/error_intrin_avx2.c b/av1/encoder/x86/error_intrin_avx2.c
index 5f60c3c71c7bc07ca0fa010b48353aaae3c2fa5a..c1d889969f4b5b778416b6bd7c5ae58dde641bf4 100644
--- a/av1/encoder/x86/error_intrin_avx2.c
+++ b/av1/encoder/x86/error_intrin_avx2.c
@@ -14,7 +14,7 @@
 #include "./av1_rtcd.h"
 #include "aom/aom_integer.h"
 
-int64_t vp10_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+int64_t av1_block_error_avx2(const int16_t *coeff, const int16_t *dqcoeff,
                               intptr_t block_size, int64_t *ssz) {
   __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
   __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
diff --git a/av1/encoder/x86/error_sse2.asm b/av1/encoder/x86/error_sse2.asm
index 0772da418e9649cc01ec4aabacfaaf724cce25e7..44a52d7cc7e85be8d1c20b522085605b9d9b09f3 100644
--- a/av1/encoder/x86/error_sse2.asm
+++ b/av1/encoder/x86/error_sse2.asm
@@ -8,13 +8,13 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vp10
+%define private_prefix av1
 
 %include "third_party/x86inc/x86inc.asm"
 
 SECTION .text
 
-; int64_t vp10_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+; int64_t av1_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
 ;                         int64_t *ssz)
 
 INIT_XMM sse2
@@ -76,7 +76,7 @@ cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz
   RET
 
 ; Compute the sum of squared difference between two int16_t vectors.
-; int64_t vp10_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
+; int64_t av1_block_error_fp(int16_t *coeff, int16_t *dqcoeff,
 ;                            intptr_t block_size)
 
 INIT_XMM sse2
diff --git a/av1/encoder/x86/highbd_block_error_intrin_sse2.c b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
index 3a4a47a068da1278e53a2af21ccd531910cb88fe..e105d54fb50edaf2a653504c39a173fe2c25e1c6 100644
--- a/av1/encoder/x86/highbd_block_error_intrin_sse2.c
+++ b/av1/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -14,7 +14,7 @@
 
 #include "av1/common/common.h"
 
-int64_t vp10_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
+int64_t av1_highbd_block_error_sse2(tran_low_t *coeff, tran_low_t *dqcoeff,
                                      intptr_t block_size, int64_t *ssz,
                                      int bps) {
   int i, j, test;
diff --git a/av1/encoder/x86/quantize_sse2.c b/av1/encoder/x86/quantize_sse2.c
index 44b44d04d8c5ab7efcef58bf24f1b99dcd2741ad..50eee353c179e2b383154f495481cadf3a6f1332 100644
--- a/av1/encoder/x86/quantize_sse2.c
+++ b/av1/encoder/x86/quantize_sse2.c
@@ -15,7 +15,7 @@
 #include "./av1_rtcd.h"
 #include "aom/aom_integer.h"
 
-void vp10_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
+void av1_quantize_fp_sse2(const int16_t* coeff_ptr, intptr_t n_coeffs,
                            int skip_block, const int16_t* zbin_ptr,
                            const int16_t* round_ptr, const int16_t* quant_ptr,
                            const int16_t* quant_shift_ptr, int16_t* qcoeff_ptr,
diff --git a/av1/encoder/x86/quantize_ssse3_x86_64.asm b/av1/encoder/x86/quantize_ssse3_x86_64.asm
index b8fefa2f164fc81d166acde4cff827be867c6dba..05e0be675e42d6c25b1a162caa61b8119d394190 100644
--- a/av1/encoder/x86/quantize_ssse3_x86_64.asm
+++ b/av1/encoder/x86/quantize_ssse3_x86_64.asm
@@ -8,7 +8,7 @@
 ;  be found in the AUTHORS file in the root of the source tree.
 ;
 
-%define private_prefix vp10
+%define private_prefix av1
 
 %include "third_party/x86inc/x86inc.asm"
 
diff --git a/av1/encoder/x86/ssim_opt_x86_64.asm b/av1/encoder/x86/ssim_opt_x86_64.asm
index 29659eedf033355eb637f1049d63a85187f27c8c..4b5c450d321ed8a7957b4e9fe29d9bdc41328e46 100644
--- a/av1/encoder/x86/ssim_opt_x86_64.asm
+++ b/av1/encoder/x86/ssim_opt_x86_64.asm
@@ -61,8 +61,8 @@
 ; or pavgb At this point this is just meant to be first pass for calculating
 ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
 ; in mode selection code.
-global sym(vp10_ssim_parms_16x16_sse2) PRIVATE
-sym(vp10_ssim_parms_16x16_sse2):
+global sym(av1_ssim_parms_16x16_sse2) PRIVATE
+sym(av1_ssim_parms_16x16_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 9
@@ -151,8 +151,8 @@ sym(vp10_ssim_parms_16x16_sse2):
 ; or pavgb At this point this is just meant to be first pass for calculating
 ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
 ; in mode selection code.
-global sym(vp10_ssim_parms_8x8_sse2) PRIVATE
-sym(vp10_ssim_parms_8x8_sse2):
+global sym(av1_ssim_parms_8x8_sse2) PRIVATE
+sym(av1_ssim_parms_8x8_sse2):
     push        rbp
     mov         rbp, rsp
     SHADOW_ARGS_TO_STACK 9
diff --git a/av1/encoder/x86/temporal_filter_apply_sse2.asm b/av1/encoder/x86/temporal_filter_apply_sse2.asm
index eabe5756401299b7d6feb9856031409b391b349d..15de6e88d101c7133ed458b9ed356a7ff305818a 100644
--- a/av1/encoder/x86/temporal_filter_apply_sse2.asm
+++ b/av1/encoder/x86/temporal_filter_apply_sse2.asm
@@ -11,7 +11,7 @@
 
 %include "aom_ports/x86_abi_support.asm"
 
-; void vp10_temporal_filter_apply_sse2 | arg
+; void av1_temporal_filter_apply_sse2 | arg
 ;  (unsigned char  *frame1,           |  0
 ;   unsigned int    stride,           |  1
 ;   unsigned char  *frame2,           |  2
@@ -21,8 +21,8 @@
 ;   int             filter_weight,    |  6
 ;   unsigned int   *accumulator,      |  7
 ;   unsigned short *count)            |  8
-global sym(vp10_temporal_filter_apply_sse2) PRIVATE
-sym(vp10_temporal_filter_apply_sse2):
+global sym(av1_temporal_filter_apply_sse2) PRIVATE
+sym(av1_temporal_filter_apply_sse2):
 
     push        rbp
     mov         rbp, rsp
diff --git a/av1/exports_dec b/av1/exports_dec
index 67f427fd7feafb63f3801bc30b0af4cbe1f7e57e..05860e8c0a0554981b0ac32f9282e4cb16f245f2 100644
--- a/av1/exports_dec
+++ b/av1/exports_dec
@@ -1,2 +1,2 @@
-data aom_codec_vp10_dx_algo
-text aom_codec_vp10_dx
+data aom_codec_av1_dx_algo
+text aom_codec_av1_dx
diff --git a/av1/exports_enc b/av1/exports_enc
index f55fef9aaa986c217a1d2a17412f9f07180f6d16..dc4a9eae79042c7ce4a390829f8456f2451dc3ea 100644
--- a/av1/exports_enc
+++ b/av1/exports_enc
@@ -1,2 +1,2 @@
-data aom_codec_vp10_cx_algo
-text aom_codec_vp10_cx
+data aom_codec_av1_cx_algo
+text aom_codec_av1_cx
diff --git a/build/make/Android.mk b/build/make/Android.mk
index 0c229c43fb2f73f58bc2e7837ba15cfae2e941fa..290c974f8eb084bfadc3e5e5434db9652b53a3bf 100644
--- a/build/make/Android.mk
+++ b/build/make/Android.mk
@@ -168,7 +168,7 @@ endif
 define rtcd_dep_template
 rtcd_dep_template_SRCS := $(addprefix $(LOCAL_PATH)/, $(LOCAL_SRC_FILES))
 rtcd_dep_template_SRCS := $$(rtcd_dep_template_SRCS:.neon=)
-ifeq ($(CONFIG_VP10), yes)
+ifeq ($(CONFIG_AV1), yes)
 $$(rtcd_dep_template_SRCS): av1_rtcd.h
 endif
 $$(rtcd_dep_template_SRCS): aom_scale_rtcd.h
diff --git a/configure b/configure
index 17f5b1096fdce67a6e81a0625919f3fcfb4d9c90..3bd2a908a34305e3c61f3df9da51fd18959276ff 100755
--- a/configure
+++ b/configure
@@ -35,7 +35,7 @@ Advanced options:
   ${toggle_debug_libs}            in/exclude debug version of libraries
   ${toggle_static_msvcrt}         use static MSVCRT (VS builds only)
   ${toggle_aom_highbitdepth}      use high bit depth (10/12) profiles
-  ${toggle_vp10}                  VP10 codec support
+  ${toggle_av1}                  AV1 codec support
   ${toggle_internal_stats}        output of encoder internal stats for debug, if supported (encoders)
   ${toggle_multithread}           multithreaded encoding and decoding
   ${toggle_spatial_resampling}    spatial sampling (scaling) support
@@ -185,7 +185,7 @@ if [ ${doxy_major:-0} -ge 1 ]; then
 fi
 
 # disable codecs when their source directory does not exist
-[ -d "${source_path}/av1" ] || disable_feature vp10
+[ -d "${source_path}/av1" ] || disable_feature av1
 
 # install everything except the sources, by default. sources will have
 # to be enabled when doing dist builds, since that's no longer a common
@@ -203,11 +203,11 @@ enable_feature os_support
 enable_feature temporal_denoising
 
 CODECS="
-    vp10_encoder
-    vp10_decoder
+    av1_encoder
+    av1_decoder
 "
 CODEC_FAMILIES="
-    vp10
+    av1
 "
 
 ARCH_LIST="
diff --git a/examples.mk b/examples.mk
index c3c203e401ee93c45fc577846579f10e4372b619..9c3fb3dee3d992a16f3d895a283864b34b953c01 100644
--- a/examples.mk
+++ b/examples.mk
@@ -168,10 +168,10 @@ set_maps.DESCRIPTION                = Set active and ROI maps
 # We should not link to math library (libm) on RVCT
 # when building for bare-metal targets
 ifeq ($(CONFIG_OS_SUPPORT), yes)
-CODEC_EXTRA_LIBS-$(CONFIG_VP10)            += m
+CODEC_EXTRA_LIBS-$(CONFIG_AV1)            += m
 else
     ifeq ($(CONFIG_GCC), yes)
-    CODEC_EXTRA_LIBS-$(CONFIG_VP10)        += m
+    CODEC_EXTRA_LIBS-$(CONFIG_AV1)        += m
     endif
 endif
 #
@@ -188,8 +188,8 @@ ifeq ($(HAVE_ALT_TREE_LAYOUT),yes)
     INC_PATH-yes := $(SRC_PATH_BARE)/../include
 else
     LIB_PATH-yes                     += $(if $(BUILD_PFX),$(BUILD_PFX),.)
-    INC_PATH-$(CONFIG_VP10_DECODER)   += $(SRC_PATH_BARE)/av1
-    INC_PATH-$(CONFIG_VP10_ENCODER)   += $(SRC_PATH_BARE)/av1
+    INC_PATH-$(CONFIG_AV1_DECODER)   += $(SRC_PATH_BARE)/av1
+    INC_PATH-$(CONFIG_AV1_ENCODER)   += $(SRC_PATH_BARE)/av1
 endif
 INC_PATH-$(CONFIG_LIBYUV) += $(SRC_PATH_BARE)/third_party/libyuv/include
 LIB_PATH := $(call enabled,LIB_PATH)
diff --git a/examples/aom_temporal_svc_encoder.c b/examples/aom_temporal_svc_encoder.c
index 3e473d7837fbd879d83bb2cc3334967eb1025e71..bac8fb4c5c33c5d005b30805d0e406f1910adce5 100644
--- a/examples/aom_temporal_svc_encoder.c
+++ b/examples/aom_temporal_svc_encoder.c
@@ -491,13 +491,13 @@ int main(int argc, char **argv) {
   struct RateControlMetrics rc;
   int64_t cx_time = 0;
   const int min_args_base = 11;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   aom_bit_depth_t bit_depth = VPX_BITS_8;
   int input_bit_depth = 8;
   const int min_args = min_args_base + 1;
 #else
   const int min_args = min_args_base;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   double sum_bitrate = 0.0;
   double sum_bitrate2 = 0.0;
   double framerate = 30.0;
@@ -505,7 +505,7 @@ int main(int argc, char **argv) {
   exec_name = argv[0];
   // Check usage and arguments.
   if (argc < min_args) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     die(
         "Usage: %s <infile> <outfile> <codec_type(vp8/vp9)> <width> <height> "
         "<rate_num> <rate_den> <speed> <frame_drop_threshold> <mode> "
@@ -517,7 +517,7 @@ int main(int argc, char **argv) {
         "<rate_num> <rate_den> <speed> <frame_drop_threshold> <mode> "
         "<Rate_0> ... <Rate_nlayers-1> \n",
         argv[0]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
 
   encoder = get_aom_encoder_by_name(argv[3]);
@@ -540,7 +540,7 @@ int main(int argc, char **argv) {
     die("Invalid number of arguments");
   }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   switch (strtol(argv[argc - 1], NULL, 0)) {
     case 8:
       bit_depth = VPX_BITS_8;
@@ -565,7 +565,7 @@ int main(int argc, char **argv) {
   if (!aom_img_alloc(&raw, VPX_IMG_FMT_I420, width, height, 32)) {
     die("Failed to allocate image", width, height);
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Populate encoder configuration.
   res = aom_codec_enc_config_default(encoder->codec_interface(), &cfg, 0);
@@ -578,13 +578,13 @@ int main(int argc, char **argv) {
   cfg.g_w = width;
   cfg.g_h = height;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (bit_depth != VPX_BITS_8) {
     cfg.g_bit_depth = bit_depth;
     cfg.g_input_bit_depth = input_bit_depth;
     cfg.g_profile = 2;
   }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
   // Timebase format e.g. 30fps: numerator=1, demoninator = 30.
   cfg.g_timebase.num = strtol(argv[6], NULL, 0);
@@ -667,13 +667,13 @@ int main(int argc, char **argv) {
   cfg.ss_number_layers = 1;
 
 // Initialize codec.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   if (aom_codec_enc_init(
           &codec, encoder->codec_interface(), &cfg,
           bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH))
 #else
   if (aom_codec_enc_init(&codec, encoder->codec_interface(), &cfg, 0))
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     die_codec(&codec, "Failed to initialize encoder");
 
   if (strncmp(encoder->name, "vp8", 3) == 0) {
diff --git a/libs.mk b/libs.mk
index bee92043dbaa18dcad33965fffe9c3392b54129b..194d8ea66df0bb1053cf41a71954632fa3fb9194 100644
--- a/libs.mk
+++ b/libs.mk
@@ -53,39 +53,39 @@ CODEC_SRCS-yes += $(addprefix aom_dsp/,$(call enabled,DSP_SRCS))
 include $(SRC_PATH_BARE)/aom_util/aom_util.mk
 CODEC_SRCS-yes += $(addprefix aom_util/,$(call enabled,UTIL_SRCS))
 
-#  VP10 make file
-ifeq ($(CONFIG_VP10),yes)
-  VP10_PREFIX=av1/
-  include $(SRC_PATH_BARE)/$(VP10_PREFIX)av1_common.mk
+#  AV1 make file
+ifeq ($(CONFIG_AV1),yes)
+  AV1_PREFIX=av1/
+  include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_common.mk
 endif
 
-ifeq ($(CONFIG_VP10_ENCODER),yes)
-  VP10_PREFIX=av1/
-  include $(SRC_PATH_BARE)/$(VP10_PREFIX)av1_cx.mk
-  CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_CX_SRCS))
-  CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_CX_EXPORTS))
-  CODEC_SRCS-yes += $(VP10_PREFIX)av1_cx.mk aom/vp8.h aom/vp8cx.h
+ifeq ($(CONFIG_AV1_ENCODER),yes)
+  AV1_PREFIX=av1/
+  include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_cx.mk
+  CODEC_SRCS-yes += $(addprefix $(AV1_PREFIX),$(call enabled,AV1_CX_SRCS))
+  CODEC_EXPORTS-yes += $(addprefix $(AV1_PREFIX),$(AV1_CX_EXPORTS))
+  CODEC_SRCS-yes += $(AV1_PREFIX)av1_cx.mk aom/vp8.h aom/vp8cx.h
   INSTALL-LIBS-yes += include/aom/vp8.h include/aom/vp8cx.h
   INSTALL-LIBS-$(CONFIG_SPATIAL_SVC) += include/aom/svc_context.h
-  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
+  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(AV1_PREFIX)/%
   CODEC_DOC_SRCS += aom/vp8.h aom/vp8cx.h
-  CODEC_DOC_SECTIONS += vp10 vp10_encoder
+  CODEC_DOC_SECTIONS += av1 av1_encoder
 endif
 
-ifeq ($(CONFIG_VP10_DECODER),yes)
-  VP10_PREFIX=av1/
-  include $(SRC_PATH_BARE)/$(VP10_PREFIX)av1_dx.mk
-  CODEC_SRCS-yes += $(addprefix $(VP10_PREFIX),$(call enabled,VP10_DX_SRCS))
-  CODEC_EXPORTS-yes += $(addprefix $(VP10_PREFIX),$(VP10_DX_EXPORTS))
-  CODEC_SRCS-yes += $(VP10_PREFIX)av1_dx.mk aom/vp8.h aom/vp8dx.h
+ifeq ($(CONFIG_AV1_DECODER),yes)
+  AV1_PREFIX=av1/
+  include $(SRC_PATH_BARE)/$(AV1_PREFIX)av1_dx.mk
+  CODEC_SRCS-yes += $(addprefix $(AV1_PREFIX),$(call enabled,AV1_DX_SRCS))
+  CODEC_EXPORTS-yes += $(addprefix $(AV1_PREFIX),$(AV1_DX_EXPORTS))
+  CODEC_SRCS-yes += $(AV1_PREFIX)av1_dx.mk aom/vp8.h aom/vp8dx.h
   INSTALL-LIBS-yes += include/aom/vp8.h include/aom/vp8dx.h
-  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(VP10_PREFIX)/%
+  INSTALL_MAPS += include/aom/% $(SRC_PATH_BARE)/$(AV1_PREFIX)/%
   CODEC_DOC_SRCS += aom/vp8.h aom/vp8dx.h
-  CODEC_DOC_SECTIONS += vp10 vp10_decoder
+  CODEC_DOC_SECTIONS += av1 av1_decoder
 endif
 
-VP10_PREFIX=av1/
-$(BUILD_PFX)$(VP10_PREFIX)%.c.o: CFLAGS += -Wextra
+AV1_PREFIX=av1/
+$(BUILD_PFX)$(AV1_PREFIX)%.c.o: CFLAGS += -Wextra
 
 ifeq ($(CONFIG_ENCODERS),yes)
   CODEC_DOC_SECTIONS += encoder
diff --git a/test/active_map_refresh_test.cc b/test/active_map_refresh_test.cc
index a95c6644b63bfe36274f28fa7791f7b8189c7f91..f53ffc8c6c4d64e562a93ae970863281369a4eb2 100644
--- a/test/active_map_refresh_test.cc
+++ b/test/active_map_refresh_test.cc
@@ -121,7 +121,7 @@ TEST_P(ActiveMapRefreshTest, Test) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
+AV1_INSTANTIATE_TEST_CASE(ActiveMapRefreshTest,
                            ::testing::Values(::libaom_test::kRealTime),
                            ::testing::Range(5, 6));
 }  // namespace
diff --git a/test/active_map_test.cc b/test/active_map_test.cc
index d633dc393210a84321af6549e41ccf32cffe3de2..d371f3d6bee559fddf10e4d3b027d05566d33695 100644
--- a/test/active_map_test.cc
+++ b/test/active_map_test.cc
@@ -79,7 +79,7 @@ TEST_P(ActiveMapTest, Test) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(ActiveMapTest,
+AV1_INSTANTIATE_TEST_CASE(ActiveMapTest,
                            ::testing::Values(::libaom_test::kRealTime),
                            ::testing::Range(0, 6));
 }  // namespace
diff --git a/test/aq_segment_test.cc b/test/aq_segment_test.cc
index e7b8ade205cfb72e8bb53295f960961c5a314b0e..25863aae824c388d4c9ec7774f1293fb507b92b9 100644
--- a/test/aq_segment_test.cc
+++ b/test/aq_segment_test.cc
@@ -102,7 +102,7 @@ TEST_P(AqSegmentTest, TestNoMisMatchAQ3) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(AqSegmentTest,
+AV1_INSTANTIATE_TEST_CASE(AqSegmentTest,
                            ::testing::Values(::libaom_test::kRealTime,
                                              ::libaom_test::kOnePassGood),
                            ::testing::Range(3, 9));
diff --git a/test/arf_freq_test.cc b/test/arf_freq_test.cc
index 9ba1e3bd0ba0c93947b22d2fe2438c453748ddc5..90d0d09e033ea19b75aa788c83959a7ad5833bfc 100644
--- a/test/arf_freq_test.cc
+++ b/test/arf_freq_test.cc
@@ -49,9 +49,9 @@ const TestVideoParam kTestVectors[] = {
   { "hantro_collage_w352h288.yuv", 352, 288, 30, 1, 8, VPX_IMG_FMT_I420,
     VPX_BITS_8, 0 },
   { "rush_hour_444.y4m", 352, 288, 30, 1, 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // Add list of profile 2/3 test videos here ...
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 
 const TestEncodeParam kEncodeVectors[] = {
@@ -62,7 +62,7 @@ const TestEncodeParam kEncodeVectors[] = {
 
 const int kMinArfVectors[] = {
   // NOTE: 0 refers to the default built-in logic in:
-  //       vp10_rc_get_default_min_gf_interval(...)
+  //       av1_rc_get_default_min_gf_interval(...)
   0, 4, 8, 12, 15
 };
 
@@ -164,7 +164,7 @@ class ArfFreqTest
     if (min_arf_requested_)
       return min_arf_requested_;
     else
-      return vp10_rc_get_default_min_gf_interval(
+      return av1_rc_get_default_min_gf_interval(
           test_video_param_.width, test_video_param_.height,
           (double)test_video_param_.framerate_num /
               test_video_param_.framerate_den);
@@ -210,20 +210,20 @@ TEST_P(ArfFreqTest, MinArfFreqTest) {
   delete (video);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1_ENCODER
 // TODO(angiebird): 25-29 fail in high bitdepth mode.
 INSTANTIATE_TEST_CASE_P(
-    DISABLED_VP10, ArfFreqTest,
+    DISABLED_AV1, ArfFreqTest,
     ::testing::Combine(
         ::testing::Values(static_cast<const libaom_test::CodecFactory *>(
-            &libaom_test::kVP10)),
+            &libaom_test::kAV1)),
         ::testing::ValuesIn(kTestVectors), ::testing::ValuesIn(kEncodeVectors),
         ::testing::ValuesIn(kMinArfVectors)));
-#endif  // CONFIG_VP10_ENCODER
+#endif  // CONFIG_AV1_ENCODER
 #else
-VP10_INSTANTIATE_TEST_CASE(ArfFreqTest, ::testing::ValuesIn(kTestVectors),
+AV1_INSTANTIATE_TEST_CASE(ArfFreqTest, ::testing::ValuesIn(kTestVectors),
                            ::testing::ValuesIn(kEncodeVectors),
                            ::testing::ValuesIn(kMinArfVectors));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/av1_dct_test.cc b/test/av1_dct_test.cc
index 6ab6a900ac04ff77855b6c9060c4986e608954b5..fa122c73dced343a1f08cee42aaf8229340d0ed3 100644
--- a/test/av1_dct_test.cc
+++ b/test/av1_dct_test.cc
@@ -84,7 +84,7 @@ class TransTestBase {
 };
 
 typedef std::tr1::tuple<FdctFunc, FdctFuncRef, int, int> FdctParam;
-class Vp10FwdTxfm : public TransTestBase,
+class AV1FwdTxfm : public TransTestBase,
                     public ::testing::TestWithParam<FdctParam> {
  public:
   virtual void SetUp() {
@@ -96,10 +96,10 @@ class Vp10FwdTxfm : public TransTestBase,
   virtual void TearDown() {}
 };
 
-TEST_P(Vp10FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
+TEST_P(AV1FwdTxfm, RunFwdAccuracyCheck) { RunFwdAccuracyCheck(); }
 
 INSTANTIATE_TEST_CASE_P(
-    C, Vp10FwdTxfm,
+    C, AV1FwdTxfm,
     ::testing::Values(FdctParam(&fdct4, &reference_dct_1d, 4, 1),
                       FdctParam(&fdct8, &reference_dct_1d, 8, 1),
                       FdctParam(&fdct16, &reference_dct_1d, 16, 2)));
diff --git a/test/av1_inv_txfm_test.cc b/test/av1_inv_txfm_test.cc
index a2731c7f3ed500ceba03af9f3a6ee4b46a2016f6..64d77719bbfbdaf442055064b2e01f595b5e4e00 100644
--- a/test/av1_inv_txfm_test.cc
+++ b/test/av1_inv_txfm_test.cc
@@ -88,7 +88,7 @@ class TransTestBase {
 };
 
 typedef std::tr1::tuple<IdctFunc, IdctFuncRef, int, int> IdctParam;
-class Vp10InvTxfm : public TransTestBase,
+class AV1InvTxfm : public TransTestBase,
                     public ::testing::TestWithParam<IdctParam> {
  public:
   virtual void SetUp() {
@@ -100,24 +100,24 @@ class Vp10InvTxfm : public TransTestBase,
   virtual void TearDown() {}
 };
 
-TEST_P(Vp10InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
+TEST_P(AV1InvTxfm, RunInvAccuracyCheck) { RunInvAccuracyCheck(); }
 
 INSTANTIATE_TEST_CASE_P(
-    C, Vp10InvTxfm,
-    ::testing::Values(IdctParam(&vp10_idct4_c, &reference_idct_1d, 4, 1),
-                      IdctParam(&vp10_idct8_c, &reference_idct_1d, 8, 2),
-                      IdctParam(&vp10_idct16_c, &reference_idct_1d, 16, 4),
-                      IdctParam(&vp10_idct32_c, &reference_idct_1d, 32, 6)));
+    C, AV1InvTxfm,
+    ::testing::Values(IdctParam(&av1_idct4_c, &reference_idct_1d, 4, 1),
+                      IdctParam(&av1_idct8_c, &reference_idct_1d, 8, 2),
+                      IdctParam(&av1_idct16_c, &reference_idct_1d, 16, 4),
+                      IdctParam(&av1_idct32_c, &reference_idct_1d, 32, 6)));
 
 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
 typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, InvTxfmFunc, TX_SIZE, int>
     PartialInvTxfmParam;
 const int kMaxNumCoeffs = 1024;
-class Vp10PartialIDctTest
+class AV1PartialIDctTest
     : public ::testing::TestWithParam<PartialInvTxfmParam> {
  public:
-  virtual ~Vp10PartialIDctTest() {}
+  virtual ~AV1PartialIDctTest() {}
   virtual void SetUp() {
     ftxfm_ = GET_PARAM(0);
     full_itxfm_ = GET_PARAM(1);
@@ -136,7 +136,7 @@ class Vp10PartialIDctTest
   InvTxfmFunc partial_itxfm_;
 };
 
-TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
+TEST_P(AV1PartialIDctTest, RunQuantCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
@@ -184,7 +184,7 @@ TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
       // quantization with maximum allowed step sizes
       test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
       for (int j = 1; j < last_nonzero_; ++j)
-        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+        test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] =
             (output_ref_block[j] / 1828) * 1828;
     }
 
@@ -202,7 +202,7 @@ TEST_P(Vp10PartialIDctTest, RunQuantCheck) {
       << "Error: partial inverse transform produces different results";
 }
 
-TEST_P(Vp10PartialIDctTest, ResultsMatch) {
+TEST_P(AV1PartialIDctTest, ResultsMatch) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   int size;
   switch (tx_size_) {
@@ -235,7 +235,7 @@ TEST_P(Vp10PartialIDctTest, ResultsMatch) {
         max_energy_leftover = 0;
         coef = 0;
       }
-      test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] = coef;
+      test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] = coef;
     }
 
     memcpy(test_coef_block2, test_coef_block1,
@@ -257,19 +257,19 @@ TEST_P(Vp10PartialIDctTest, ResultsMatch) {
 using std::tr1::make_tuple;
 
 INSTANTIATE_TEST_CASE_P(
-    C, Vp10PartialIDctTest,
-    ::testing::Values(make_tuple(&aom_fdct32x32_c, &vp10_idct32x32_1024_add_c,
-                                 &vp10_idct32x32_34_add_c, TX_32X32, 34),
-                      make_tuple(&aom_fdct32x32_c, &vp10_idct32x32_1024_add_c,
-                                 &vp10_idct32x32_1_add_c, TX_32X32, 1),
-                      make_tuple(&aom_fdct16x16_c, &vp10_idct16x16_256_add_c,
-                                 &vp10_idct16x16_10_add_c, TX_16X16, 10),
-                      make_tuple(&aom_fdct16x16_c, &vp10_idct16x16_256_add_c,
-                                 &vp10_idct16x16_1_add_c, TX_16X16, 1),
-                      make_tuple(&aom_fdct8x8_c, &vp10_idct8x8_64_add_c,
-                                 &vp10_idct8x8_12_add_c, TX_8X8, 12),
-                      make_tuple(&aom_fdct8x8_c, &vp10_idct8x8_64_add_c,
-                                 &vp10_idct8x8_1_add_c, TX_8X8, 1),
-                      make_tuple(&aom_fdct4x4_c, &vp10_idct4x4_16_add_c,
-                                 &vp10_idct4x4_1_add_c, TX_4X4, 1)));
+    C, AV1PartialIDctTest,
+    ::testing::Values(make_tuple(&aom_fdct32x32_c, &av1_idct32x32_1024_add_c,
+                                 &av1_idct32x32_34_add_c, TX_32X32, 34),
+                      make_tuple(&aom_fdct32x32_c, &av1_idct32x32_1024_add_c,
+                                 &av1_idct32x32_1_add_c, TX_32X32, 1),
+                      make_tuple(&aom_fdct16x16_c, &av1_idct16x16_256_add_c,
+                                 &av1_idct16x16_10_add_c, TX_16X16, 10),
+                      make_tuple(&aom_fdct16x16_c, &av1_idct16x16_256_add_c,
+                                 &av1_idct16x16_1_add_c, TX_16X16, 1),
+                      make_tuple(&aom_fdct8x8_c, &av1_idct8x8_64_add_c,
+                                 &av1_idct8x8_12_add_c, TX_8X8, 12),
+                      make_tuple(&aom_fdct8x8_c, &av1_idct8x8_64_add_c,
+                                 &av1_idct8x8_1_add_c, TX_8X8, 1),
+                      make_tuple(&aom_fdct4x4_c, &av1_idct4x4_16_add_c,
+                                 &av1_idct4x4_1_add_c, TX_4X4, 1)));
 }  // namespace
diff --git a/test/boolcoder_test.cc b/test/boolcoder_test.cc
index 18d5c02125f65636a9ed9137f8dd02604e59d42d..b376a09a6f4eb4c322523dfba8450658effa897a 100644
--- a/test/boolcoder_test.cc
+++ b/test/boolcoder_test.cc
@@ -25,7 +25,7 @@ namespace {
 const int num_tests = 10;
 }  // namespace
 
-TEST(VP10, TestBitIO) {
+TEST(AV1, TestBitIO) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   for (int n = 0; n < num_tests; ++n) {
     for (int method = 0; method <= 7; ++method) {  // we generate various proba
diff --git a/test/borders_test.cc b/test/borders_test.cc
index 7fad160afe3a3e6ff196e55d3e850e21d03b782e..95c87fb70a859230f8074f930742cfef4a4eb2e1 100644
--- a/test/borders_test.cc
+++ b/test/borders_test.cc
@@ -79,6 +79,6 @@ TEST_P(BordersTest, TestLowBitrate) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(BordersTest,
+AV1_INSTANTIATE_TEST_CASE(BordersTest,
                            ::testing::Values(::libaom_test::kTwoPassGood));
 }  // namespace
diff --git a/test/codec_factory.h b/test/codec_factory.h
index db65ac35e2045aafd5b35114fda8c7f8b2b895da..3306ce7d6782e79f367b2bf897025ff2d04d5f51 100644
--- a/test/codec_factory.h
+++ b/test/codec_factory.h
@@ -14,10 +14,10 @@
 #include "./aom_config.h"
 #include "aom/aom_decoder.h"
 #include "aom/aom_encoder.h"
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 #include "aom/vp8cx.h"
 #endif
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
 #include "aom/vp8dx.h"
 #endif
 
@@ -70,47 +70,47 @@ class CodecTestWith3Params
           std::tr1::tuple<const libaom_test::CodecFactory*, T1, T2, T3> > {};
 
 /*
- * VP10 Codec Definitions
+ * AV1 Codec Definitions
  */
-#if CONFIG_VP10
-class VP10Decoder : public Decoder {
+#if CONFIG_AV1
+class AV1Decoder : public Decoder {
  public:
-  VP10Decoder(aom_codec_dec_cfg_t cfg, unsigned long deadline)
+  AV1Decoder(aom_codec_dec_cfg_t cfg, unsigned long deadline)
       : Decoder(cfg, deadline) {}
 
-  VP10Decoder(aom_codec_dec_cfg_t cfg, const aom_codec_flags_t flag,
+  AV1Decoder(aom_codec_dec_cfg_t cfg, const aom_codec_flags_t flag,
               unsigned long deadline)  // NOLINT
       : Decoder(cfg, flag, deadline) {}
 
  protected:
   virtual aom_codec_iface_t* CodecInterface() const {
-#if CONFIG_VP10_DECODER
-    return &aom_codec_vp10_dx_algo;
+#if CONFIG_AV1_DECODER
+    return &aom_codec_av1_dx_algo;
 #else
     return NULL;
 #endif
   }
 };
 
-class VP10Encoder : public Encoder {
+class AV1Encoder : public Encoder {
  public:
-  VP10Encoder(aom_codec_enc_cfg_t cfg, unsigned long deadline,
+  AV1Encoder(aom_codec_enc_cfg_t cfg, unsigned long deadline,
               const unsigned long init_flags, TwopassStatsStore* stats)
       : Encoder(cfg, deadline, init_flags, stats) {}
 
  protected:
   virtual aom_codec_iface_t* CodecInterface() const {
-#if CONFIG_VP10_ENCODER
-    return &aom_codec_vp10_cx_algo;
+#if CONFIG_AV1_ENCODER
+    return &aom_codec_av1_cx_algo;
 #else
     return NULL;
 #endif
   }
 };
 
-class VP10CodecFactory : public CodecFactory {
+class AV1CodecFactory : public CodecFactory {
  public:
-  VP10CodecFactory() : CodecFactory() {}
+  AV1CodecFactory() : CodecFactory() {}
 
   virtual Decoder* CreateDecoder(aom_codec_dec_cfg_t cfg,
                                  unsigned long deadline) const {
@@ -120,8 +120,8 @@ class VP10CodecFactory : public CodecFactory {
   virtual Decoder* CreateDecoder(aom_codec_dec_cfg_t cfg,
                                  const aom_codec_flags_t flags,
                                  unsigned long deadline) const {  // NOLINT
-#if CONFIG_VP10_DECODER
-    return new VP10Decoder(cfg, flags, deadline);
+#if CONFIG_AV1_DECODER
+    return new AV1Decoder(cfg, flags, deadline);
 #else
     return NULL;
 #endif
@@ -131,8 +131,8 @@ class VP10CodecFactory : public CodecFactory {
                                  unsigned long deadline,
                                  const unsigned long init_flags,
                                  TwopassStatsStore* stats) const {
-#if CONFIG_VP10_ENCODER
-    return new VP10Encoder(cfg, deadline, init_flags, stats);
+#if CONFIG_AV1_ENCODER
+    return new AV1Encoder(cfg, deadline, init_flags, stats);
 #else
     return NULL;
 #endif
@@ -140,26 +140,26 @@ class VP10CodecFactory : public CodecFactory {
 
   virtual aom_codec_err_t DefaultEncoderConfig(aom_codec_enc_cfg_t* cfg,
                                                int usage) const {
-#if CONFIG_VP10_ENCODER
-    return aom_codec_enc_config_default(&aom_codec_vp10_cx_algo, cfg, usage);
+#if CONFIG_AV1_ENCODER
+    return aom_codec_enc_config_default(&aom_codec_av1_cx_algo, cfg, usage);
 #else
     return VPX_CODEC_INCAPABLE;
 #endif
   }
 };
 
-const libaom_test::VP10CodecFactory kVP10;
+const libaom_test::AV1CodecFactory kAV1;
 
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)                              \
+#define AV1_INSTANTIATE_TEST_CASE(test, ...)                              \
   INSTANTIATE_TEST_CASE_P(                                                 \
-      VP10, test,                                                          \
+      AV1, test,                                                          \
       ::testing::Combine(                                                  \
           ::testing::Values(static_cast<const libaom_test::CodecFactory*>( \
-              &libaom_test::kVP10)),                                       \
+              &libaom_test::kAV1)),                                       \
           __VA_ARGS__))
 #else
-#define VP10_INSTANTIATE_TEST_CASE(test, ...)
-#endif  // CONFIG_VP10
+#define AV1_INSTANTIATE_TEST_CASE(test, ...)
+#endif  // CONFIG_AV1
 
 }  // namespace libaom_test
 #endif  // TEST_CODEC_FACTORY_H_
diff --git a/test/convolve_test.cc b/test/convolve_test.cc
index 004e2e2fc3802ffe00a97ccd8e474c8d261aa3da..3a735b22540fcb1d8ce367c29383a3e0f8f35402 100644
--- a/test/convolve_test.cc
+++ b/test/convolve_test.cc
@@ -165,7 +165,7 @@ void filter_average_block2d_8_c(const uint8_t *src_ptr,
   block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width, output_height);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
                                const unsigned int src_stride,
                                const int16_t *HFilter, const int16_t *VFilter,
@@ -266,7 +266,7 @@ void highbd_filter_average_block2d_8_c(
   highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width,
                            output_height, bd);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
  public:
@@ -279,7 +279,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
         aom_memalign(kDataAlignment, kOutputBufferSize));
     output_ref_ = reinterpret_cast<uint8_t *>(
         aom_memalign(kDataAlignment, kOutputBufferSize));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     input16_ = reinterpret_cast<uint16_t *>(aom_memalign(
                    kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) +
                1;
@@ -299,7 +299,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
     output_ = NULL;
     aom_free(output_ref_);
     output_ref_ = NULL;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     aom_free(input16_ - 1);
     input16_ = NULL;
     aom_free(output16_);
@@ -334,7 +334,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
 
   virtual void SetUp() {
     UUT_ = GET_PARAM(2);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ != 0)
       mask_ = (1 << UUT_->use_highbd_) - 1;
     else
@@ -352,12 +352,12 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
     for (int i = 0; i < kInputBufferSize; ++i) {
       if (i & 1) {
         input_[i] = 255;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         input16_[i] = mask_;
 #endif
       } else {
         input_[i] = prng.Rand8Extremes();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         input16_[i] = prng.Rand16() & mask_;
 #endif
       }
@@ -366,14 +366,14 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
 
   void SetConstantInput(int value) {
     memset(input_, value, kInputBufferSize);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     aom_memset16(input16_, value, kInputBufferSize);
 #endif
   }
 
   void CopyOutputToRef() {
     memcpy(output_ref_, output_, kOutputBufferSize);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     memcpy(output16_ref_, output16_, kOutputBufferSize);
 #endif
   }
@@ -385,7 +385,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   }
 
   uint8_t *input() const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return input_ + BorderTop() * kOuterBlockSize + BorderLeft();
     } else {
@@ -398,7 +398,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   }
 
   uint8_t *output() const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return output_ + BorderTop() * kOuterBlockSize + BorderLeft();
     } else {
@@ -411,7 +411,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   }
 
   uint8_t *output_ref() const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return output_ref_ + BorderTop() * kOuterBlockSize + BorderLeft();
     } else {
@@ -424,7 +424,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   }
 
   uint16_t lookup(uint8_t *list, int index) const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       return list[index];
     } else {
@@ -436,7 +436,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   }
 
   void assign_val(uint8_t *list, int index, uint16_t val) const {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       list[index] = (uint8_t)val;
     } else {
@@ -452,7 +452,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
       const int16_t *HFilter, const int16_t *VFilter, uint8_t *dst_ptr,
       unsigned int dst_stride, unsigned int output_width,
       unsigned int output_height) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       filter_average_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
                                  dst_stride, output_width, output_height);
@@ -475,7 +475,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
                                   unsigned int dst_stride,
                                   unsigned int output_width,
                                   unsigned int output_height) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (UUT_->use_highbd_ == 0) {
       filter_block2d_8_c(src_ptr, src_stride, HFilter, VFilter, dst_ptr,
                          dst_stride, output_width, output_height);
@@ -495,7 +495,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
   static uint8_t *input_;
   static uint8_t *output_;
   static uint8_t *output_ref_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   static uint16_t *input16_;
   static uint16_t *output16_;
   static uint16_t *output16_ref_;
@@ -506,7 +506,7 @@ class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
 uint8_t *ConvolveTest::input_ = NULL;
 uint8_t *ConvolveTest::output_ = NULL;
 uint8_t *ConvolveTest::output_ref_ = NULL;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 uint16_t *ConvolveTest::input16_ = NULL;
 uint16_t *ConvolveTest::output16_ = NULL;
 uint16_t *ConvolveTest::output16_ref_ = NULL;
@@ -613,7 +613,7 @@ const int kNumFilters = 16;
 TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
   for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
     const InterpKernel *filters =
-        vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+        av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
     for (int i = 0; i < kNumFilters; i++) {
       const int p0 = filters[i][0] + filters[i][1];
       const int p1 = filters[i][2] + filters[i][3];
@@ -636,7 +636,7 @@ const int16_t kInvalidFilter[8] = { 0 };
 TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
   uint8_t *ref;
@@ -651,7 +651,7 @@ TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
 
   for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
     const InterpKernel *filters =
-        vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+        av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
 
     for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
       for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
@@ -693,7 +693,7 @@ TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
 TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
   uint8_t *ref;
@@ -711,7 +711,7 @@ TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
   for (int y = 0; y < Height(); ++y) {
     for (int x = 0; x < Width(); ++x) {
       uint16_t r;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
         r = prng.Rand8Extremes();
       } else {
@@ -728,7 +728,7 @@ TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
 
   for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
     const InterpKernel *filters =
-        vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+        av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
 
     for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
       for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
@@ -770,7 +770,7 @@ TEST_P(ConvolveTest, MatchesReferenceAveragingSubpixelFilter) {
 TEST_P(ConvolveTest, FilterExtremes) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   uint8_t ref8[kOutputStride * kMaxDimension];
   uint16_t ref16[kOutputStride * kMaxDimension];
   uint8_t *ref;
@@ -788,7 +788,7 @@ TEST_P(ConvolveTest, FilterExtremes) {
   for (int y = 0; y < Height(); ++y) {
     for (int x = 0; x < Width(); ++x) {
       uint16_t r;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
         r = prng.Rand8Extremes();
       } else {
@@ -807,7 +807,7 @@ TEST_P(ConvolveTest, FilterExtremes) {
     while (seed_val < 256) {
       for (int y = 0; y < 8; ++y) {
         for (int x = 0; x < 8; ++x) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
           assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
                      ((seed_val >> (axis ? y : x)) & 1) * mask_);
 #else
@@ -825,7 +825,7 @@ TEST_P(ConvolveTest, FilterExtremes) {
 
       for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
         const InterpKernel *filters =
-            vp10_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
+            av1_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
         for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
           for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
             wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
@@ -867,7 +867,7 @@ TEST_P(ConvolveTest, FilterExtremes) {
 TEST_P(ConvolveTest, CheckScalingFiltering) {
   uint8_t *const in = input();
   uint8_t *const out = output();
-  const InterpKernel *const eighttap = vp10_filter_kernels[EIGHTTAP];
+  const InterpKernel *const eighttap = av1_filter_kernels[EIGHTTAP];
 
   SetConstantInput(127);
 
@@ -894,7 +894,7 @@ TEST_P(ConvolveTest, CheckScalingFiltering) {
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #define WRAP(func, bd)                                                       \
   void wrap_##func##_##bd(                                                   \
       const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst,                \
@@ -1032,7 +1032,7 @@ INSTANTIATE_TEST_CASE_P(
 #endif
 
 #if HAVE_SSE2 && ARCH_X86_64
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 const ConvolveFunctions convolve8_sse2(
 #if CONFIG_USE_X86INC
     wrap_convolve_copy_sse2_8, wrap_convolve_avg_sse2_8,
@@ -1130,7 +1130,7 @@ INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest,
                                           make_tuple(64, 32, &convolve8_sse2),
                                           make_tuple(32, 64, &convolve8_sse2),
                                           make_tuple(64, 64, &convolve8_sse2)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif
 
 #if HAVE_SSSE3
diff --git a/test/cpu_speed_test.cc b/test/cpu_speed_test.cc
index a5339b7687fb52be59238eb122b515991f72dd42..71bb4522d271e803f5dd0edd3953d7513dbb088a 100644
--- a/test/cpu_speed_test.cc
+++ b/test/cpu_speed_test.cc
@@ -130,7 +130,7 @@ TEST_P(CpuSpeedTest, TestLowBitrate) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(CpuSpeedTest,
+AV1_INSTANTIATE_TEST_CASE(CpuSpeedTest,
                            ::testing::Values(::libaom_test::kTwoPassGood,
                                              ::libaom_test::kOnePassGood),
                            ::testing::Range(0, 3));
diff --git a/test/datarate_test.cc b/test/datarate_test.cc
index 2aa7a049533289c23e1d6a1aa52645daf79ae54d..7e1cbba61e8b1d9274618ec220b0c5229be3fa05 100644
--- a/test/datarate_test.cc
+++ b/test/datarate_test.cc
@@ -545,7 +545,7 @@ TEST_P(DatarateTestVP9Large, ChangingDropFrameThresh) {
 }
 
 // Check basic rate targeting for 2 temporal layers.
-#if 0  // VP10 does not support multiple layers yet
+#if 0  // AV1 does not support multiple layers yet
 TEST_P(DatarateTestVP9Large, BasicRateTargeting2TemporalLayers) {
   cfg_.rc_buf_initial_sz = 500;
   cfg_.rc_buf_optimal_sz = 500;
@@ -894,13 +894,13 @@ TEST_P(DatarateOnePassCbrSvc, OnePassCbrSvc4threads) {
   EXPECT_EQ(GetMismatchFrames(), (unsigned int)0);
 }
 
-VP10_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
+AV1_INSTANTIATE_TEST_CASE(DatarateTestVP9Large,
                            ::testing::Values(::libaom_test::kOnePassGood,
                                              ::libaom_test::kRealTime),
                            ::testing::Range(2, 7));
 
-/* VP10 does not support multiple layers yet.
-VP10_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
+/* AV1 does not support multiple layers yet.
+AV1_INSTANTIATE_TEST_CASE(DatarateOnePassCbrSvc,
                            ::testing::Values(::libaom_test::kRealTime),
                            ::testing::Range(5, 8));
                            */
diff --git a/test/dct16x16_test.cc b/test/dct16x16_test.cc
index e662592c5efab3365e5464b1b3f650f3f67e352c..0feb80f80f14fd96afbd0c3ba6fb67b2a8a12562 100644
--- a/test/dct16x16_test.cc
+++ b/test/dct16x16_test.cc
@@ -253,15 +253,15 @@ void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
 }
 
 void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht16x16_c(in, out, stride, tx_type);
+  av1_fht16x16_c(in, out, stride, tx_type);
 }
 
 void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
                   int tx_type) {
-  vp10_iht16x16_256_add_c(in, dest, stride, tx_type);
+  av1_iht16x16_256_add_c(in, dest, stride, tx_type);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct16x16_256_add_c(in, out, stride, 10);
 }
@@ -281,11 +281,11 @@ void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
 }
 
 void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
+  av1_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
 }
 
 void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
+  av1_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
 }
 
 void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
@@ -313,7 +313,7 @@ void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
 }
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class Trans16x16TestBase {
  public:
@@ -334,7 +334,7 @@ class Trans16x16TestBase {
       DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -345,7 +345,7 @@ class Trans16x16TestBase {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -358,7 +358,7 @@ class Trans16x16TestBase {
           RunFwdTxfm(test_input_block, test_temp_block, pitch_));
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -366,7 +366,7 @@ class Trans16x16TestBase {
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -445,7 +445,7 @@ class Trans16x16TestBase {
 
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
 #endif
@@ -465,7 +465,7 @@ class Trans16x16TestBase {
       // clear reconstructed pixel buffers
       memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
       memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
       memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
 #endif
@@ -477,7 +477,7 @@ class Trans16x16TestBase {
       if (bit_depth_ == VPX_BITS_8) {
         inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
                      tx_type_);
@@ -487,7 +487,7 @@ class Trans16x16TestBase {
       }
       if (bit_depth_ == VPX_BITS_8) {
         for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref[j], dst[j]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         for (int j = 0; j < kNumCoeffs; ++j) EXPECT_EQ(ref16[j], dst16[j]);
 #endif
@@ -502,10 +502,10 @@ class Trans16x16TestBase {
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     for (int i = 0; i < count_test_block; ++i) {
       double out_r[kNumCoeffs];
@@ -516,12 +516,12 @@ class Trans16x16TestBase {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
           in[j] = src16[j] - dst16[j];
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
 
@@ -531,20 +531,20 @@ class Trans16x16TestBase {
 
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), 16));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
         const uint32_t diff = dst[j] - src[j];
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         const uint32_t error = diff * diff;
         EXPECT_GE(1u, error) << "Error: 16x16 IDCT has error " << error
                              << " at index " << j;
@@ -556,14 +556,14 @@ class Trans16x16TestBase {
     ACMRandom rnd(ACMRandom::DeterministicSeed());
     const int count_test_block = 10000;
     const int eob = 10;
-    const int16_t *scan = vp10_default_scan_orders[TX_16X16].scan;
+    const int16_t *scan = av1_default_scan_orders[TX_16X16].scan;
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     for (int i = 0; i < count_test_block; ++i) {
       for (int j = 0; j < kNumCoeffs; ++j) {
@@ -576,31 +576,31 @@ class Trans16x16TestBase {
         if (bit_depth_ == VPX_BITS_8) {
           dst[j] = 0;
           ref[j] = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           dst16[j] = 0;
           ref16[j] = 0;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
       if (bit_depth_ == VPX_BITS_8) {
         ref_txfm(coeff, ref, pitch_);
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
       } else {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
 #else
         const uint32_t diff = dst[j] - ref[j];
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         const uint32_t error = diff * diff;
         EXPECT_EQ(0u, error) << "Error: 16x16 IDCT Comparison has error "
                              << error << " at index " << j;
@@ -630,7 +630,7 @@ class Trans16x16DCT : public Trans16x16TestBase,
     fwd_txfm_ref = fdct16x16_ref;
     inv_txfm_ref = idct16x16_ref;
     mask_ = (1 << bit_depth_) - 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     switch (bit_depth_) {
       case VPX_BITS_10: inv_txfm_ref = idct16x16_10_ref; break;
       case VPX_BITS_12: inv_txfm_ref = idct16x16_12_ref; break;
@@ -682,7 +682,7 @@ class Trans16x16HT : public Trans16x16TestBase,
     fwd_txfm_ref = fht16x16_ref;
     inv_txfm_ref = iht16x16_ref;
     mask_ = (1 << bit_depth_) - 1;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     switch (bit_depth_) {
       case VPX_BITS_10: inv_txfm_ref = iht16x16_10; break;
       case VPX_BITS_12: inv_txfm_ref = iht16x16_12; break;
@@ -750,7 +750,7 @@ TEST_P(InvTrans16x16DCT, CompareReference) {
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans16x16DCT,
     ::testing::Values(
@@ -762,59 +762,59 @@ INSTANTIATE_TEST_CASE_P(C, Trans16x16DCT,
                         ::testing::Values(make_tuple(&aom_fdct16x16_c,
                                                      &aom_idct16x16_256_add_c,
                                                      0, VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans16x16HT,
     ::testing::Values(
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, VPX_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, Trans16x16HT,
     ::testing::Values(
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht16x16_c, &vp10_iht16x16_256_add_c, 3, VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht16x16_c, &av1_iht16x16_256_add_c, 3, VPX_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans16x16DCT,
     ::testing::Values(make_tuple(&aom_fdct16x16_c, &aom_idct16x16_256_add_neon,
                                  0, VPX_BITS_8)));
 #endif
 
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16DCT,
     ::testing::Values(make_tuple(&aom_fdct16x16_sse2,
                                  &aom_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16HT,
-    ::testing::Values(make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 0, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 1, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 2, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2,
-                                 &vp10_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
-
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    ::testing::Values(make_tuple(&av1_fht16x16_sse2,
+                                 &av1_iht16x16_256_add_sse2, 0, VPX_BITS_8),
+                      make_tuple(&av1_fht16x16_sse2,
+                                 &av1_iht16x16_256_add_sse2, 1, VPX_BITS_8),
+                      make_tuple(&av1_fht16x16_sse2,
+                                 &av1_iht16x16_256_add_sse2, 2, VPX_BITS_8),
+                      make_tuple(&av1_fht16x16_sse2,
+                                 &av1_iht16x16_256_add_sse2, 3, VPX_BITS_8)));
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16DCT,
     ::testing::Values(
@@ -828,13 +828,13 @@ INSTANTIATE_TEST_CASE_P(
                    VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans16x16HT,
-    ::testing::Values(make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+    ::testing::Values(make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
                                  0, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                      make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
                                  1, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                      make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
                                  2, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_sse2, &vp10_iht16x16_256_add_c,
+                      make_tuple(&av1_fht16x16_sse2, &av1_iht16x16_256_add_c,
                                  3, VPX_BITS_8)));
 // Optimizations take effect at a threshold of 3155, so we use a value close to
 // that to test both branches.
@@ -848,22 +848,22 @@ INSTANTIATE_TEST_CASE_P(
                                  &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
                       make_tuple(&idct16x16_12, &idct16x16_256_add_12_sse2,
                                  3167, VPX_BITS_12)));
-#endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MSA, Trans16x16DCT,
                         ::testing::Values(make_tuple(&aom_fdct16x16_msa,
                                                      &aom_idct16x16_256_add_msa,
                                                      0, VPX_BITS_8)));
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans16x16HT,
-    ::testing::Values(make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+    ::testing::Values(make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
                                  0, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                      make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
                                  1, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                      make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
                                  2, VPX_BITS_8),
-                      make_tuple(&vp10_fht16x16_msa, &vp10_iht16x16_256_add_msa,
+                      make_tuple(&av1_fht16x16_msa, &av1_iht16x16_256_add_msa,
                                  3, VPX_BITS_8)));
-#endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/dct32x32_test.cc b/test/dct32x32_test.cc
index 466076b0268a26c97676180c3b08262bdeab7b35..92f3278f37a0330dee93c21b936f3149b237071b 100644
--- a/test/dct32x32_test.cc
+++ b/test/dct32x32_test.cc
@@ -75,7 +75,7 @@ typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
 typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmFunc, int, aom_bit_depth_t>
     Trans32x32Param;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct32x32_8(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct32x32_1024_add_c(in, out, stride, 8);
 }
@@ -87,7 +87,7 @@ void idct32x32_10(const tran_low_t *in, uint8_t *out, int stride) {
 void idct32x32_12(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct32x32_1024_add_c(in, out, stride, 12);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class Trans32x32Test : public ::testing::TestWithParam<Trans32x32Param> {
  public:
@@ -120,7 +120,7 @@ TEST_P(Trans32x32Test, AccuracyCheck) {
   DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -132,7 +132,7 @@ TEST_P(Trans32x32Test, AccuracyCheck) {
         src[j] = rnd.Rand8();
         dst[j] = rnd.Rand8();
         test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         src16[j] = rnd.Rand16() & mask_;
         dst16[j] = rnd.Rand16() & mask_;
@@ -144,7 +144,7 @@ TEST_P(Trans32x32Test, AccuracyCheck) {
     ASM_REGISTER_STATE_CHECK(fwd_txfm_(test_input_block, test_temp_block, 32));
     if (bit_depth_ == VPX_BITS_8) {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(test_temp_block, dst, 32));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       ASM_REGISTER_STATE_CHECK(
           inv_txfm_(test_temp_block, CONVERT_TO_BYTEPTR(dst16), 32));
@@ -152,7 +152,7 @@ TEST_P(Trans32x32Test, AccuracyCheck) {
     }
 
     for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       const uint32_t diff =
           bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -253,7 +253,7 @@ TEST_P(Trans32x32Test, InverseAccuracy) {
   DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -267,7 +267,7 @@ TEST_P(Trans32x32Test, InverseAccuracy) {
         src[j] = rnd.Rand8();
         dst[j] = rnd.Rand8();
         in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         src16[j] = rnd.Rand16() & mask_;
         dst16[j] = rnd.Rand16() & mask_;
@@ -281,13 +281,13 @@ TEST_P(Trans32x32Test, InverseAccuracy) {
       coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
     if (bit_depth_ == VPX_BITS_8) {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, dst, 32));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       ASM_REGISTER_STATE_CHECK(inv_txfm_(coeff, CONVERT_TO_BYTEPTR(dst16), 32));
 #endif
     }
     for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       const int diff =
           bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -302,7 +302,7 @@ TEST_P(Trans32x32Test, InverseAccuracy) {
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans32x32Test,
     ::testing::Values(
@@ -320,27 +320,27 @@ INSTANTIATE_TEST_CASE_P(
                                  VPX_BITS_8),
                       make_tuple(&aom_fdct32x32_rd_c, &aom_idct32x32_1024_add_c,
                                  1, VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans32x32Test,
     ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_neon,
                                  0, VPX_BITS_8),
                       make_tuple(&aom_fdct32x32_rd_c,
                                  &aom_idct32x32_1024_add_neon, 1, VPX_BITS_8)));
-#endif  // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans32x32Test,
     ::testing::Values(make_tuple(&aom_fdct32x32_sse2,
                                  &aom_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
                       make_tuple(&aom_fdct32x32_rd_sse2,
                                  &aom_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans32x32Test,
     ::testing::Values(
@@ -354,23 +354,23 @@ INSTANTIATE_TEST_CASE_P(
                    VPX_BITS_8),
         make_tuple(&aom_fdct32x32_rd_sse2, &aom_idct32x32_1024_add_c, 1,
                    VPX_BITS_8)));
-#endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_AVX2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_AVX2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     AVX2, Trans32x32Test,
     ::testing::Values(make_tuple(&aom_fdct32x32_avx2,
                                  &aom_idct32x32_1024_add_sse2, 0, VPX_BITS_8),
                       make_tuple(&aom_fdct32x32_rd_avx2,
                                  &aom_idct32x32_1024_add_sse2, 1, VPX_BITS_8)));
-#endif  // HAVE_AVX2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_AVX2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans32x32Test,
     ::testing::Values(make_tuple(&aom_fdct32x32_msa,
                                  &aom_idct32x32_1024_add_msa, 0, VPX_BITS_8),
                       make_tuple(&aom_fdct32x32_rd_msa,
                                  &aom_idct32x32_1024_add_msa, 1, VPX_BITS_8)));
-#endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/decode_api_test.cc b/test/decode_api_test.cc
index 1fc083c9b5aba8267fd4da934dd1b0813f871988..51e67aaed8c09b8f4696a1afe7325fd44213e449 100644
--- a/test/decode_api_test.cc
+++ b/test/decode_api_test.cc
@@ -21,8 +21,8 @@ namespace {
 
 TEST(DecodeAPI, InvalidParams) {
   static const aom_codec_iface_t *kCodecs[] = {
-#if CONFIG_VP10_DECODER
-    &aom_codec_vp10_dx_algo,
+#if CONFIG_AV1_DECODER
+    &aom_codec_av1_dx_algo,
 #endif
   };
   uint8_t buf[1] = { 0 };
diff --git a/test/decode_perf_test.cc b/test/decode_perf_test.cc
index 58dcb4cd6fb6b5016509cc69a852ebcd2d2d1157..bea1dec8d1c6f133814f2ce91b271fa982418d1a 100644
--- a/test/decode_perf_test.cc
+++ b/test/decode_perf_test.cc
@@ -255,6 +255,6 @@ TEST_P(VP9NewEncodeDecodePerfTest, PerfTest) {
   printf("}\n");
 }
 
-VP10_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
+AV1_INSTANTIATE_TEST_CASE(VP9NewEncodeDecodePerfTest,
                            ::testing::Values(::libaom_test::kTwoPassGood));
 }  // namespace
diff --git a/test/encode_api_test.cc b/test/encode_api_test.cc
index e01646d1887a133c31a812d81e6fff03a01366a2..09ee91a74bd0515b9db1025b7bd51585c937ade1 100644
--- a/test/encode_api_test.cc
+++ b/test/encode_api_test.cc
@@ -26,8 +26,8 @@ TEST(EncodeAPI, InvalidParams) {
 #if CONFIG_VP9_ENCODER
     &aom_codec_vp9_cx_algo,
 #endif
-#if CONFIG_VP10_ENCODER
-    &aom_codec_vp10_cx_algo,
+#if CONFIG_AV1_ENCODER
+    &aom_codec_av1_cx_algo,
 #endif
   };
   uint8_t buf[1] = { 0 };
diff --git a/test/encode_perf_test.cc b/test/encode_perf_test.cc
index 9b4abeec388d859796fcf7cf998f4b0acd90c295..a78afee4b9905a1f532b8bc3e15802544ed62fbc 100644
--- a/test/encode_perf_test.cc
+++ b/test/encode_perf_test.cc
@@ -182,6 +182,6 @@ TEST_P(VP9EncodePerfTest, PerfTest) {
   }
 }
 
-VP10_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
+AV1_INSTANTIATE_TEST_CASE(VP9EncodePerfTest,
                            ::testing::Values(::libaom_test::kRealTime));
 }  // namespace
diff --git a/test/encode_test_driver.cc b/test/encode_test_driver.cc
index 1d658e14f873da5ebb288caa23cbf947ec03dc94..c6d42aae4648a1642529d87e47a2785df2c6aea9 100644
--- a/test/encode_test_driver.cc
+++ b/test/encode_test_driver.cc
@@ -33,9 +33,9 @@ void Encoder::InitEncoder(VideoSource *video) {
     res = aom_codec_enc_init(&encoder_, CodecInterface(), &cfg_, init_flags_);
     ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
 
-#if CONFIG_VP10_ENCODER
-    if (CodecInterface() == &aom_codec_vp10_cx_algo) {
-      // Default to 1 tile column for VP10.
+#if CONFIG_AV1_ENCODER
+    if (CodecInterface() == &aom_codec_av1_cx_algo) {
+      // Default to 1 tile column for AV1.
       const int log2_tile_columns = 0;
       res = aom_codec_control_(&encoder_, VP9E_SET_TILE_COLUMNS,
                                log2_tile_columns);
diff --git a/test/encode_test_driver.h b/test/encode_test_driver.h
index e8d2e9dc5728a6f5323c2fa1a12b04f5ed69be01..a60d6df740e6abaffafd234faa171bf687cb0ad9 100644
--- a/test/encode_test_driver.h
+++ b/test/encode_test_driver.h
@@ -17,7 +17,7 @@
 #include "third_party/googletest/src/include/gtest/gtest.h"
 
 #include "./aom_config.h"
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 #include "aom/vp8cx.h"
 #endif
 #include "aom/aom_encoder.h"
@@ -133,7 +133,7 @@ class Encoder {
     const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
     ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
   }
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
   void Control(int ctrl_id, aom_active_map_t *arg) {
     const aom_codec_err_t res = aom_codec_control_(&encoder_, ctrl_id, arg);
     ASSERT_EQ(VPX_CODEC_OK, res) << EncoderError();
diff --git a/test/encoder_parms_get_to_decoder.cc b/test/encoder_parms_get_to_decoder.cc
index 52d15a5be72845739b5e370495e4d414253e054b..602bbcaeb569ff1202a02c3bab7cd8f35d36ace7 100644
--- a/test/encoder_parms_get_to_decoder.cc
+++ b/test/encoder_parms_get_to_decoder.cc
@@ -100,7 +100,7 @@ class VpxEncoderParmsGetToDecoder
         reinterpret_cast<aom_codec_alg_priv_t *>(vp9_decoder->priv);
     FrameWorkerData *const worker_data =
         reinterpret_cast<FrameWorkerData *>(priv->frame_workers[0].data1);
-    VP10_COMMON *const common = &worker_data->pbi->common;
+    AV1_COMMON *const common = &worker_data->pbi->common;
 
     if (encode_parms.lossless) {
       EXPECT_EQ(0, common->base_qindex);
@@ -143,7 +143,7 @@ TEST_P(VpxEncoderParmsGetToDecoder, BitstreamParms) {
   delete video;
 }
 
-VP10_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
+AV1_INSTANTIATE_TEST_CASE(VpxEncoderParmsGetToDecoder,
                            ::testing::ValuesIn(kVP9EncodeParameterSet),
                            ::testing::ValuesIn(kVP9EncodePerfTestVectors));
 }  // namespace
diff --git a/test/end_to_end_test.cc b/test/end_to_end_test.cc
index 1617f4ade8f66d0a39ed6f285eb62ec0d36738e6..cae8c9a25e17ea4a6d6870ec81d11d7a8ea88358 100644
--- a/test/end_to_end_test.cc
+++ b/test/end_to_end_test.cc
@@ -44,7 +44,7 @@ const TestVideoParam kTestVectors[] = {
   { "park_joy_90p_8_422.y4m", 8, VPX_IMG_FMT_I422, VPX_BITS_8, 1 },
   { "park_joy_90p_8_444.y4m", 8, VPX_IMG_FMT_I444, VPX_BITS_8, 1 },
   { "park_joy_90p_8_440.yuv", 8, VPX_IMG_FMT_I440, VPX_BITS_8, 1 },
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   { "park_joy_90p_10_420.y4m", 10, VPX_IMG_FMT_I42016, VPX_BITS_10, 2 },
   { "park_joy_90p_10_422.y4m", 10, VPX_IMG_FMT_I42216, VPX_BITS_10, 3 },
   { "park_joy_90p_10_444.y4m", 10, VPX_IMG_FMT_I44416, VPX_BITS_10, 3 },
@@ -53,7 +53,7 @@ const TestVideoParam kTestVectors[] = {
   { "park_joy_90p_12_422.y4m", 12, VPX_IMG_FMT_I42216, VPX_BITS_12, 3 },
   { "park_joy_90p_12_444.y4m", 12, VPX_IMG_FMT_I44416, VPX_BITS_12, 3 },
   { "park_joy_90p_12_440.yuv", 12, VPX_IMG_FMT_I44016, VPX_BITS_12, 3 },
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 
 // Encoding modes tested
@@ -169,22 +169,22 @@ TEST_P(EndToEndTestLarge, EndtoEndPSNRTest) {
   delete (video);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AOM_HIGHBITDEPTH
+#if CONFIG_AV1_ENCODER
 // TODO(angiebird): many fail in high bitdepth mode.
 INSTANTIATE_TEST_CASE_P(
-    DISABLED_VP10, EndToEndTestLarge,
+    DISABLED_AV1, EndToEndTestLarge,
     ::testing::Combine(
         ::testing::Values(static_cast<const libaom_test::CodecFactory *>(
-            &libaom_test::kVP10)),
+            &libaom_test::kAV1)),
         ::testing::ValuesIn(kEncodingModeVectors),
         ::testing::ValuesIn(kTestVectors),
         ::testing::ValuesIn(kCpuUsedVectors)));
-#endif  // CONFIG_VP10_ENCODER
+#endif  // CONFIG_AV1_ENCODER
 #else
-VP10_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
+AV1_INSTANTIATE_TEST_CASE(EndToEndTestLarge,
                            ::testing::ValuesIn(kEncodingModeVectors),
                            ::testing::ValuesIn(kTestVectors),
                            ::testing::ValuesIn(kCpuUsedVectors));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/error_block_test.cc b/test/error_block_test.cc
index 9eb51ae900063ddc1727f80a3cc1d75184c3f478..7925ae8b8b3fd35283687f1f0946212483b4c085 100644
--- a/test/error_block_test.cc
+++ b/test/error_block_test.cc
@@ -27,7 +27,7 @@
 using libaom_test::ACMRandom;
 
 namespace {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 const int kNumIterations = 1000;
 
 typedef int64_t (*ErrorBlockFunc)(const tran_low_t *coeff,
@@ -158,51 +158,51 @@ TEST_P(ErrorBlockTest, ExtremeValues) {
 using std::tr1::make_tuple;
 
 #if CONFIG_USE_X86INC
-int64_t wrap_vp10_highbd_block_error_8bit_c(const tran_low_t *coeff,
+int64_t wrap_av1_highbd_block_error_8bit_c(const tran_low_t *coeff,
                                             const tran_low_t *dqcoeff,
                                             intptr_t block_size, int64_t *ssz,
                                             int bps) {
   assert(bps == 8);
-  return vp10_highbd_block_error_8bit_c(coeff, dqcoeff, block_size, ssz);
+  return av1_highbd_block_error_8bit_c(coeff, dqcoeff, block_size, ssz);
 }
 
 #if HAVE_SSE2
-int64_t wrap_vp10_highbd_block_error_8bit_sse2(const tran_low_t *coeff,
+int64_t wrap_av1_highbd_block_error_8bit_sse2(const tran_low_t *coeff,
                                                const tran_low_t *dqcoeff,
                                                intptr_t block_size,
                                                int64_t *ssz, int bps) {
   assert(bps == 8);
-  return vp10_highbd_block_error_8bit_sse2(coeff, dqcoeff, block_size, ssz);
+  return av1_highbd_block_error_8bit_sse2(coeff, dqcoeff, block_size, ssz);
 }
 
 INSTANTIATE_TEST_CASE_P(
     SSE2, ErrorBlockTest,
-    ::testing::Values(make_tuple(&vp10_highbd_block_error_sse2,
-                                 &vp10_highbd_block_error_c, VPX_BITS_10),
-                      make_tuple(&vp10_highbd_block_error_sse2,
-                                 &vp10_highbd_block_error_c, VPX_BITS_12),
-                      make_tuple(&vp10_highbd_block_error_sse2,
-                                 &vp10_highbd_block_error_c, VPX_BITS_8),
-                      make_tuple(&wrap_vp10_highbd_block_error_8bit_sse2,
-                                 &wrap_vp10_highbd_block_error_8bit_c,
+    ::testing::Values(make_tuple(&av1_highbd_block_error_sse2,
+                                 &av1_highbd_block_error_c, VPX_BITS_10),
+                      make_tuple(&av1_highbd_block_error_sse2,
+                                 &av1_highbd_block_error_c, VPX_BITS_12),
+                      make_tuple(&av1_highbd_block_error_sse2,
+                                 &av1_highbd_block_error_c, VPX_BITS_8),
+                      make_tuple(&wrap_av1_highbd_block_error_8bit_sse2,
+                                 &wrap_av1_highbd_block_error_8bit_c,
                                  VPX_BITS_8)));
 #endif  // HAVE_SSE2
 
 #if HAVE_AVX
-int64_t wrap_vp10_highbd_block_error_8bit_avx(const tran_low_t *coeff,
+int64_t wrap_av1_highbd_block_error_8bit_avx(const tran_low_t *coeff,
                                               const tran_low_t *dqcoeff,
                                               intptr_t block_size, int64_t *ssz,
                                               int bps) {
   assert(bps == 8);
-  return vp10_highbd_block_error_8bit_avx(coeff, dqcoeff, block_size, ssz);
+  return av1_highbd_block_error_8bit_avx(coeff, dqcoeff, block_size, ssz);
 }
 
 INSTANTIATE_TEST_CASE_P(AVX, ErrorBlockTest,
                         ::testing::Values(make_tuple(
-                            &wrap_vp10_highbd_block_error_8bit_avx,
-                            &wrap_vp10_highbd_block_error_8bit_c, VPX_BITS_8)));
+                            &wrap_av1_highbd_block_error_8bit_avx,
+                            &wrap_av1_highbd_block_error_8bit_c, VPX_BITS_8)));
 #endif  // HAVE_AVX
 
 #endif  // CONFIG_USE_X86INC
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 }  // namespace
diff --git a/test/error_resilience_test.cc b/test/error_resilience_test.cc
index 8f39dffa79b245749bb7a0f5c236b18358c78db6..1b8d5952f8ada6f31b74e43cb4e2a6694ad0b6e7 100644
--- a/test/error_resilience_test.cc
+++ b/test/error_resilience_test.cc
@@ -570,7 +570,7 @@ TEST_P(ErrorResilienceTestLargeCodecControls, CodecControl3TemporalLayers) {
   }
 }
 
-// SVC-related tests don't run for VP10 since SVC is not supported.
-VP10_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES,
+// SVC-related tests don't run for AV1 since SVC is not supported.
+AV1_INSTANTIATE_TEST_CASE(ErrorResilienceTestLarge, ONE_PASS_TEST_MODES,
                            ::testing::Values(false));
 }  // namespace
diff --git a/test/ethread_test.cc b/test/ethread_test.cc
index 2250361ab455e11c1e37524ecc33460f390a6959..aae62e9bedf52ee981071475bdfe2d7fceadb03a 100644
--- a/test/ethread_test.cc
+++ b/test/ethread_test.cc
@@ -124,7 +124,7 @@ TEST_P(VPxEncoderThreadTest, EncoderResultTest) {
   ASSERT_EQ(single_thr_md5, multi_thr_md5);
 }
 
-VP10_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
+AV1_INSTANTIATE_TEST_CASE(VPxEncoderThreadTest,
                            ::testing::Values(::libaom_test::kTwoPassGood,
                                              ::libaom_test::kOnePassGood),
                            ::testing::Range(1, 3));
diff --git a/test/fdct4x4_test.cc b/test/fdct4x4_test.cc
index 0c4cafb6bf6ad9bcac146f6435b441a6925a12c6..81d0e380a5b0429b00b7f3bf711a111b7513d0e4 100644
--- a/test/fdct4x4_test.cc
+++ b/test/fdct4x4_test.cc
@@ -44,14 +44,14 @@ void fdct4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
 }
 
 void fht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht4x4_c(in, out, stride, tx_type);
+  av1_fht4x4_c(in, out, stride, tx_type);
 }
 
 void fwht4x4_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fwht4x4_c(in, out, stride);
+  av1_fwht4x4_c(in, out, stride);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct4x4_16_add_c(in, out, stride, 10);
 }
@@ -61,11 +61,11 @@ void idct4x4_12(const tran_low_t *in, uint8_t *out, int stride) {
 }
 
 void iht4x4_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
+  av1_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 10);
 }
 
 void iht4x4_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
+  av1_highbd_iht4x4_16_add_c(in, out, stride, tx_type, 12);
 }
 
 void iwht4x4_10(const tran_low_t *in, uint8_t *out, int stride) {
@@ -85,7 +85,7 @@ void idct4x4_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct4x4_16_add_sse2(in, out, stride, 12);
 }
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class Trans4x4TestBase {
  public:
@@ -106,7 +106,7 @@ class Trans4x4TestBase {
       DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
       DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -117,7 +117,7 @@ class Trans4x4TestBase {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -130,7 +130,7 @@ class Trans4x4TestBase {
           RunFwdTxfm(test_input_block, test_temp_block, pitch_));
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -138,7 +138,7 @@ class Trans4x4TestBase {
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -218,7 +218,7 @@ class Trans4x4TestBase {
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
 #endif
@@ -230,7 +230,7 @@ class Trans4x4TestBase {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -243,7 +243,7 @@ class Trans4x4TestBase {
 
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -251,7 +251,7 @@ class Trans4x4TestBase {
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -381,7 +381,7 @@ TEST_P(Trans4x4WHT, MemCheck) { RunMemCheck(); }
 TEST_P(Trans4x4WHT, InvAccuracyCheck) { RunInvAccuracyCheck(0); }
 using std::tr1::make_tuple;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4DCT,
     ::testing::Values(
@@ -393,82 +393,82 @@ INSTANTIATE_TEST_CASE_P(C, Trans4x4DCT,
                         ::testing::Values(make_tuple(&aom_fdct4x4_c,
                                                      &aom_idct4x4_16_add_c, 0,
                                                      VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 0, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 1, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 2, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_10, 3, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 0, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 1, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 2, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht4x4_c, &iht4x4_12, 3, VPX_BITS_12),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, VPX_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, Trans4x4WHT,
     ::testing::Values(
-        make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
-        make_tuple(&vp10_fwht4x4_c, &aom_iwht4x4_16_add_c, 0, VPX_BITS_8)));
+        make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_10, 0, VPX_BITS_10),
+        make_tuple(&av1_highbd_fwht4x4_c, &iwht4x4_12, 0, VPX_BITS_12),
+        make_tuple(&av1_fwht4x4_c, &aom_iwht4x4_16_add_c, 0, VPX_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(C, Trans4x4WHT,
-                        ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+                        ::testing::Values(make_tuple(&av1_fwht4x4_c,
                                                      &aom_iwht4x4_16_add_c, 0,
                                                      VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(NEON, Trans4x4DCT,
                         ::testing::Values(make_tuple(&aom_fdct4x4_c,
                                                      &aom_idct4x4_16_add_neon,
                                                      0, VPX_BITS_8)));
-#endif  // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_c, &vp10_iht4x4_16_add_neon, 3, VPX_BITS_8)));
-#endif  // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 0, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 1, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 2, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_c, &av1_iht4x4_16_add_neon, 3, VPX_BITS_8)));
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_VPX_HIGHBITDEPTH && \
+#if CONFIG_USE_X86INC && HAVE_MMX && !CONFIG_AOM_HIGHBITDEPTH && \
     !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MMX, Trans4x4WHT,
-                        ::testing::Values(make_tuple(&vp10_fwht4x4_mmx,
+                        ::testing::Values(make_tuple(&av1_fwht4x4_mmx,
                                                      &aom_iwht4x4_16_add_c, 0,
                                                      VPX_BITS_8)));
 #endif
 
-#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && \
+#if CONFIG_USE_X86INC && HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && \
     !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4WHT,
-                        ::testing::Values(make_tuple(&vp10_fwht4x4_c,
+                        ::testing::Values(make_tuple(&av1_fwht4x4_c,
                                                      &aom_iwht4x4_16_add_sse2,
                                                      0, VPX_BITS_8)));
 #endif
 
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
                         ::testing::Values(make_tuple(&aom_fdct4x4_sse2,
                                                      &aom_idct4x4_16_add_sse2,
@@ -476,14 +476,14 @@ INSTANTIATE_TEST_CASE_P(SSE2, Trans4x4DCT,
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_sse2, 3,
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 0, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 1, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 2, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_sse2, 3,
                    VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4DCT,
     ::testing::Values(
@@ -496,13 +496,13 @@ INSTANTIATE_TEST_CASE_P(
 INSTANTIATE_TEST_CASE_P(
     SSE2, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_sse2, &vp10_iht4x4_16_add_c, 3, VPX_BITS_8)));
-#endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_sse2, &av1_iht4x4_16_add_c, 3, VPX_BITS_8)));
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
                         ::testing::Values(make_tuple(&aom_fdct4x4_msa,
                                                      &aom_idct4x4_16_add_msa, 0,
@@ -510,9 +510,9 @@ INSTANTIATE_TEST_CASE_P(MSA, Trans4x4DCT,
 INSTANTIATE_TEST_CASE_P(
     MSA, Trans4x4HT,
     ::testing::Values(
-        make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht4x4_msa, &vp10_iht4x4_16_add_msa, 3, VPX_BITS_8)));
-#endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 0, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 1, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 2, VPX_BITS_8),
+        make_tuple(&av1_fht4x4_msa, &av1_iht4x4_16_add_msa, 3, VPX_BITS_8)));
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/fdct8x8_test.cc b/test/fdct8x8_test.cc
index 197f01d6a0647068ce2543a6e3d60b452f8fcb79..20a9f753b6afbee4922abd01b7623acbbceb66ec 100644
--- a/test/fdct8x8_test.cc
+++ b/test/fdct8x8_test.cc
@@ -81,10 +81,10 @@ void fdct8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
 }
 
 void fht8x8_ref(const int16_t *in, tran_low_t *out, int stride, int tx_type) {
-  vp10_fht8x8_c(in, out, stride, tx_type);
+  av1_fht8x8_c(in, out, stride, tx_type);
 }
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void idct8x8_10(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct8x8_64_add_c(in, out, stride, 10);
 }
@@ -94,11 +94,11 @@ void idct8x8_12(const tran_low_t *in, uint8_t *out, int stride) {
 }
 
 void iht8x8_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
+  av1_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 10);
 }
 
 void iht8x8_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
-  vp10_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
+  av1_highbd_iht8x8_64_add_c(in, out, stride, tx_type, 12);
 }
 
 void idct8x8_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
@@ -126,7 +126,7 @@ void idct8x8_64_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
   aom_highbd_idct8x8_64_add_sse2(in, out, stride, 12);
 }
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 class FwdTrans8x8TestBase {
  public:
@@ -211,7 +211,7 @@ class FwdTrans8x8TestBase {
     DECLARE_ALIGNED(16, tran_low_t, test_temp_block[64]);
     DECLARE_ALIGNED(16, uint8_t, dst[64]);
     DECLARE_ALIGNED(16, uint8_t, src[64]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[64]);
     DECLARE_ALIGNED(16, uint16_t, src16[64]);
 #endif
@@ -223,7 +223,7 @@ class FwdTrans8x8TestBase {
           src[j] = rnd.Rand8();
           dst[j] = rnd.Rand8();
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand16() & mask_;
           dst16[j] = rnd.Rand16() & mask_;
@@ -247,7 +247,7 @@ class FwdTrans8x8TestBase {
       }
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -255,7 +255,7 @@ class FwdTrans8x8TestBase {
       }
 
       for (int j = 0; j < 64; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -287,7 +287,7 @@ class FwdTrans8x8TestBase {
     DECLARE_ALIGNED(16, tran_low_t, ref_temp_block[64]);
     DECLARE_ALIGNED(16, uint8_t, dst[64]);
     DECLARE_ALIGNED(16, uint8_t, src[64]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[64]);
     DECLARE_ALIGNED(16, uint16_t, src16[64]);
 #endif
@@ -307,7 +307,7 @@ class FwdTrans8x8TestBase {
             dst[j] = rnd.Rand8() % 2 ? 255 : 0;
           }
           test_input_block[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           if (i == 0) {
             src16[j] = mask_;
@@ -330,7 +330,7 @@ class FwdTrans8x8TestBase {
           fwd_txfm_ref(test_input_block, ref_temp_block, pitch_, tx_type_));
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(test_temp_block, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -338,7 +338,7 @@ class FwdTrans8x8TestBase {
       }
 
       for (int j = 0; j < 64; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const int diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -373,7 +373,7 @@ class FwdTrans8x8TestBase {
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
 #endif
@@ -387,7 +387,7 @@ class FwdTrans8x8TestBase {
           src[j] = rnd.Rand8() % 2 ? 255 : 0;
           dst[j] = src[j] > 0 ? 0 : 255;
           in[j] = src[j] - dst[j];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           src16[j] = rnd.Rand8() % 2 ? mask_ : 0;
           dst16[j] = src16[j] > 0 ? 0 : mask_;
@@ -402,7 +402,7 @@ class FwdTrans8x8TestBase {
 
       if (bit_depth_ == VPX_BITS_8) {
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ASM_REGISTER_STATE_CHECK(
             RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16), pitch_));
@@ -410,7 +410,7 @@ class FwdTrans8x8TestBase {
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
 #else
@@ -458,11 +458,11 @@ class FwdTrans8x8TestBase {
     DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
     DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
 #endif
-    const int16_t *scan = vp10_default_scan_orders[TX_8X8].scan;
+    const int16_t *scan = av1_default_scan_orders[TX_8X8].scan;
 
     for (int i = 0; i < count_test_block; ++i) {
       for (int j = 0; j < kNumCoeffs; ++j) {
@@ -475,7 +475,7 @@ class FwdTrans8x8TestBase {
         if (bit_depth_ == VPX_BITS_8) {
           dst[j] = 0;
           ref[j] = 0;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           dst16[j] = 0;
           ref16[j] = 0;
@@ -485,7 +485,7 @@ class FwdTrans8x8TestBase {
       if (bit_depth_ == VPX_BITS_8) {
         ref_txfm(coeff, ref, pitch_);
         ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
         ASM_REGISTER_STATE_CHECK(
@@ -494,7 +494,7 @@ class FwdTrans8x8TestBase {
       }
 
       for (int j = 0; j < kNumCoeffs; ++j) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         const uint32_t diff =
             bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
 #else
@@ -620,7 +620,7 @@ TEST_P(InvTrans8x8DCT, CompareReference) {
 
 using std::tr1::make_tuple;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, FwdTrans8x8DCT,
     ::testing::Values(
@@ -632,52 +632,52 @@ INSTANTIATE_TEST_CASE_P(C, FwdTrans8x8DCT,
                         ::testing::Values(make_tuple(&aom_fdct8x8_c,
                                                      &aom_idct8x8_64_add_c, 0,
                                                      VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     C, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
-        make_tuple(&vp10_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 0, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 1, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 2, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_10, 3, VPX_BITS_10),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 0, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 1, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 2, VPX_BITS_12),
+        make_tuple(&av1_highbd_fht8x8_c, &iht8x8_12, 3, VPX_BITS_12),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, VPX_BITS_8)));
 #else
 INSTANTIATE_TEST_CASE_P(
     C, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_c, 3, VPX_BITS_8)));
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
-#if HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(NEON, FwdTrans8x8DCT,
                         ::testing::Values(make_tuple(&aom_fdct8x8_neon,
                                                      &aom_idct8x8_64_add_neon,
                                                      0, VPX_BITS_8)));
-#endif  // HAVE_NEON_ASM && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_NEON_ASM && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_c, &vp10_iht8x8_64_add_neon, 3, VPX_BITS_8)));
-#endif  // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 0, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 1, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 2, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_c, &av1_iht8x8_64_add_neon, 3, VPX_BITS_8)));
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
                         ::testing::Values(make_tuple(&aom_fdct8x8_sse2,
                                                      &aom_idct8x8_64_add_sse2,
@@ -685,14 +685,14 @@ INSTANTIATE_TEST_CASE_P(SSE2, FwdTrans8x8DCT,
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_sse2, 3,
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 0, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 1, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 2, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_sse2, 3,
                    VPX_BITS_8)));
-#endif  // HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8DCT,
     ::testing::Values(make_tuple(&aom_fdct8x8_sse2, &aom_idct8x8_64_add_c, 0,
@@ -709,10 +709,10 @@ INSTANTIATE_TEST_CASE_P(
 INSTANTIATE_TEST_CASE_P(
     SSE2, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_sse2, &vp10_iht8x8_64_add_c, 3, VPX_BITS_8)));
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 0, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 1, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 2, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_sse2, &av1_iht8x8_64_add_c, 3, VPX_BITS_8)));
 
 // Optimizations take effect at a threshold of 6201, so we use a value close to
 // that to test both branches.
@@ -725,17 +725,17 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&idct8x8_10_add_12_c, &idct8x8_10_add_12_sse2, 6225,
                    VPX_BITS_12),
         make_tuple(&idct8x8_12, &idct8x8_64_add_12_sse2, 6225, VPX_BITS_12)));
-#endif  // HAVE_SSE2 && CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_SSE2 && CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
-    !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(SSSE3, FwdTrans8x8DCT,
                         ::testing::Values(make_tuple(&aom_fdct8x8_ssse3,
                                                      &aom_idct8x8_64_add_ssse3,
                                                      0, VPX_BITS_8)));
 #endif
 
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
                         ::testing::Values(make_tuple(&aom_fdct8x8_msa,
                                                      &aom_idct8x8_64_add_msa, 0,
@@ -743,9 +743,9 @@ INSTANTIATE_TEST_CASE_P(MSA, FwdTrans8x8DCT,
 INSTANTIATE_TEST_CASE_P(
     MSA, FwdTrans8x8HT,
     ::testing::Values(
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 0, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 1, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 2, VPX_BITS_8),
-        make_tuple(&vp10_fht8x8_msa, &vp10_iht8x8_64_add_msa, 3, VPX_BITS_8)));
-#endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 0, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 1, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 2, VPX_BITS_8),
+        make_tuple(&av1_fht8x8_msa, &av1_iht8x8_64_add_msa, 3, VPX_BITS_8)));
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 }  // namespace
diff --git a/test/frame_size_tests.cc b/test/frame_size_tests.cc
index c1d916f68008ca86ffa6fec2767844f7b101f12d..df49a2cb0ed8a4b2bcd82de710744ea63d67f459 100644
--- a/test/frame_size_tests.cc
+++ b/test/frame_size_tests.cc
@@ -17,7 +17,7 @@ class VP9FrameSizeTestsLarge : public ::libaom_test::EncoderTest,
                                public ::testing::Test {
  protected:
   VP9FrameSizeTestsLarge()
-      : EncoderTest(&::libaom_test::kVP10), expected_res_(VPX_CODEC_OK) {}
+      : EncoderTest(&::libaom_test::kAV1), expected_res_(VPX_CODEC_OK) {}
   virtual ~VP9FrameSizeTestsLarge() {}
 
   virtual void SetUp() {
diff --git a/test/intrapred_test.cc b/test/intrapred_test.cc
index 545098a3c237a50ccc277ff8d47fb188b2a59ae2..0efd0f29d82a6a91e99f2e979b452e0c7087e72c 100644
--- a/test/intrapred_test.cc
+++ b/test/intrapred_test.cc
@@ -128,7 +128,7 @@ TEST_P(VP9IntraPredTest, IntraPredTests) {
 using std::tr1::make_tuple;
 
 #if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 #if CONFIG_USE_X86INC
 INSTANTIATE_TEST_CASE_P(
     SSE2_TO_C_8, VP9IntraPredTest,
@@ -212,6 +212,6 @@ INSTANTIATE_TEST_CASE_P(
                                  &aom_highbd_tm_predictor_8x8_c, 8, 12)));
 
 #endif  // CONFIG_USE_X86INC
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_SSE2
 }  // namespace
diff --git a/test/lossless_test.cc b/test/lossless_test.cc
index 316767e9ed6396e7963f2fe9c4af2c11d1bbe38f..53895e9339b99bde42f828ad80b6c258b4d7cf46 100644
--- a/test/lossless_test.cc
+++ b/test/lossless_test.cc
@@ -118,7 +118,7 @@ TEST_P(LosslessTest, TestLossLessEncodingCtrl) {
   EXPECT_GE(psnr_lossless, kMaxPsnr);
 }
 
-VP10_INSTANTIATE_TEST_CASE(LosslessTest,
+AV1_INSTANTIATE_TEST_CASE(LosslessTest,
                            ::testing::Values(::libaom_test::kOnePassGood,
                                              ::libaom_test::kTwoPassGood));
 }  // namespace
diff --git a/test/lpf_8_test.cc b/test/lpf_8_test.cc
index 594eae99297d71bb0c6dd0f8c3f4dc8f844e2d0b..5a41c1bd950d40296a08f02823c36b85701ef826 100644
--- a/test/lpf_8_test.cc
+++ b/test/lpf_8_test.cc
@@ -34,7 +34,7 @@ const int kNumCoeffs = 1024;
 
 const int number_of_iterations = 10000;
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef void (*loop_op_t)(uint16_t *s, int p, const uint8_t *blimit,
                           const uint8_t *limit, const uint8_t *thresh,
                           int count, int bd);
@@ -50,13 +50,13 @@ typedef void (*dual_loop_op_t)(uint8_t *s, int p, const uint8_t *blimit0,
                                const uint8_t *limit0, const uint8_t *thresh0,
                                const uint8_t *blimit1, const uint8_t *limit1,
                                const uint8_t *thresh1);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 typedef std::tr1::tuple<loop_op_t, loop_op_t, int, int> loop8_param_t;
 typedef std::tr1::tuple<dual_loop_op_t, dual_loop_op_t, int> dualloop8_param_t;
 
 #if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void wrapper_vertical_16_sse2(uint16_t *s, int p, const uint8_t *blimit,
                               const uint8_t *limit, const uint8_t *thresh,
                               int count, int bd) {
@@ -104,11 +104,11 @@ void wrapper_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
                                 int count) {
   aom_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_SSE2
 
 #if HAVE_NEON_ASM
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // No neon high bitdepth functions.
 #else
 void wrapper_vertical_16_neon(uint8_t *s, int p, const uint8_t *blimit,
@@ -134,10 +134,10 @@ void wrapper_vertical_16_dual_c(uint8_t *s, int p, const uint8_t *blimit,
                                 int count) {
   aom_lpf_vertical_16_dual_c(s, p, blimit, limit, thresh);
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_NEON_ASM
 
-#if HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#if HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
 void wrapper_vertical_16_msa(uint8_t *s, int p, const uint8_t *blimit,
                              const uint8_t *limit, const uint8_t *thresh,
                              int count) {
@@ -149,7 +149,7 @@ void wrapper_vertical_16_c(uint8_t *s, int p, const uint8_t *blimit,
                            int count) {
   aom_lpf_vertical_16_c(s, p, blimit, limit, thresh);
 }
-#endif  // HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#endif  // HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
 
 class Loop8Test6Param : public ::testing::TestWithParam<loop8_param_t> {
  public:
@@ -194,14 +194,14 @@ class Loop8Test9Param : public ::testing::TestWithParam<dualloop8_param_t> {
 TEST_P(Loop8Test6Param, OperationCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   int32_t bd = bit_depth_;
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
@@ -249,7 +249,7 @@ TEST_P(Loop8Test6Param, OperationCheck) {
       }
       ref_s[j] = s[j];
     }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_, bd);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd));
@@ -257,7 +257,7 @@ TEST_P(Loop8Test6Param, OperationCheck) {
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
@@ -276,14 +276,14 @@ TEST_P(Loop8Test6Param, OperationCheck) {
 TEST_P(Loop8Test6Param, ValueCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int32_t bd = bit_depth_;
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
 
@@ -318,7 +318,7 @@ TEST_P(Loop8Test6Param, ValueCheck) {
       s[j] = rnd.Rand16() & mask_;
       ref_s[j] = s[j];
     }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_, bd);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_, bd));
@@ -326,7 +326,7 @@ TEST_P(Loop8Test6Param, ValueCheck) {
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit, limit, thresh, count_);
     ASM_REGISTER_STATE_CHECK(
         loopfilter_op_(s + 8 + p * 8, p, blimit, limit, thresh, count_));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
     }
@@ -344,14 +344,14 @@ TEST_P(Loop8Test6Param, ValueCheck) {
 TEST_P(Loop8Test9Param, OperationCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   const int32_t bd = bit_depth_;
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
@@ -411,7 +411,7 @@ TEST_P(Loop8Test9Param, OperationCheck) {
       }
       ref_s[j] = s[j];
     }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
                        limit1, thresh1, bd);
     ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
@@ -422,7 +422,7 @@ TEST_P(Loop8Test9Param, OperationCheck) {
                        limit1, thresh1);
     ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
                                             thresh0, blimit1, limit1, thresh1));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
     }
@@ -440,13 +440,13 @@ TEST_P(Loop8Test9Param, OperationCheck) {
 TEST_P(Loop8Test9Param, ValueCheck) {
   ACMRandom rnd(ACMRandom::DeterministicSeed());
   const int count_test_block = number_of_iterations;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   DECLARE_ALIGNED(16, uint16_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(16, uint16_t, ref_s[kNumCoeffs]);
 #else
   DECLARE_ALIGNED(8, uint8_t, s[kNumCoeffs]);
   DECLARE_ALIGNED(8, uint8_t, ref_s[kNumCoeffs]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   int err_count_total = 0;
   int first_failure = -1;
   for (int i = 0; i < count_test_block; ++i) {
@@ -480,7 +480,7 @@ TEST_P(Loop8Test9Param, ValueCheck) {
       s[j] = rnd.Rand16() & mask_;
       ref_s[j] = s[j];
     }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const int32_t bd = bit_depth_;
     ref_loopfilter_op_(ref_s + 8 + p * 8, p, blimit0, limit0, thresh0, blimit1,
                        limit1, thresh1, bd);
@@ -492,7 +492,7 @@ TEST_P(Loop8Test9Param, ValueCheck) {
                        limit1, thresh1);
     ASM_REGISTER_STATE_CHECK(loopfilter_op_(s + 8 + p * 8, p, blimit0, limit0,
                                             thresh0, blimit1, limit1, thresh1));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int j = 0; j < kNumCoeffs; ++j) {
       err_count += ref_s[j] != s[j];
     }
@@ -510,7 +510,7 @@ TEST_P(Loop8Test9Param, ValueCheck) {
 using std::tr1::make_tuple;
 
 #if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test6Param,
     ::testing::Values(
@@ -570,10 +570,10 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(&wrapper_vertical_16_sse2, &wrapper_vertical_16_c, 8, 1),
         make_tuple(&wrapper_vertical_16_dual_sse2, &wrapper_vertical_16_dual_c,
                    8, 1)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif
 
-#if HAVE_AVX2 && (!CONFIG_VPX_HIGHBITDEPTH)
+#if HAVE_AVX2 && (!CONFIG_AOM_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     AVX2, Loop8Test6Param,
     ::testing::Values(make_tuple(&aom_lpf_horizontal_16_avx2,
@@ -583,7 +583,7 @@ INSTANTIATE_TEST_CASE_P(
 #endif
 
 #if HAVE_SSE2
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 INSTANTIATE_TEST_CASE_P(
     SSE2, Loop8Test9Param,
     ::testing::Values(make_tuple(&aom_highbd_lpf_horizontal_4_dual_sse2,
@@ -621,11 +621,11 @@ INSTANTIATE_TEST_CASE_P(
                                  &aom_lpf_vertical_4_dual_c, 8),
                       make_tuple(&aom_lpf_vertical_8_dual_sse2,
                                  &aom_lpf_vertical_8_dual_c, 8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif
 
 #if HAVE_NEON
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 // No neon high bitdepth functions.
 #else
 INSTANTIATE_TEST_CASE_P(
@@ -657,10 +657,10 @@ INSTANTIATE_TEST_CASE_P(NEON, Loop8Test9Param,
                                        &aom_lpf_horizontal_4_dual_c, 8),
                             make_tuple(&aom_lpf_vertical_4_dual_neon,
                                        &aom_lpf_vertical_4_dual_c, 8)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_NEON
 
-#if HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#if HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
 INSTANTIATE_TEST_CASE_P(
     MSA, Loop8Test6Param,
     ::testing::Values(
@@ -680,6 +680,6 @@ INSTANTIATE_TEST_CASE_P(
                                  &aom_lpf_vertical_4_dual_c, 8),
                       make_tuple(&aom_lpf_vertical_8_dual_msa,
                                  &aom_lpf_vertical_8_dual_c, 8)));
-#endif  // HAVE_MSA && (!CONFIG_VPX_HIGHBITDEPTH)
+#endif  // HAVE_MSA && (!CONFIG_AOM_HIGHBITDEPTH)
 
 }  // namespace
diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc
index a62afc077c4c625b11729cba30b4970ee11d40d2..94631f6af42c117195c47c5ab23658324dfea765 100644
--- a/test/partial_idct_test.cc
+++ b/test/partial_idct_test.cc
@@ -101,7 +101,7 @@ TEST_P(PartialIDctTest, RunQuantCheck) {
       // quantization with maximum allowed step sizes
       test_coef_block1[0] = (output_ref_block[0] / 1336) * 1336;
       for (int j = 1; j < last_nonzero_; ++j)
-        test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] =
+        test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] =
             (output_ref_block[j] / 1828) * 1828;
     }
 
@@ -152,7 +152,7 @@ TEST_P(PartialIDctTest, ResultsMatch) {
         max_energy_leftover = 0;
         coef = 0;
       }
-      test_coef_block1[vp10_default_scan_orders[tx_size_].scan[j]] = coef;
+      test_coef_block1[av1_default_scan_orders[tx_size_].scan[j]] = coef;
     }
 
     memcpy(test_coef_block2, test_coef_block1,
@@ -190,7 +190,7 @@ INSTANTIATE_TEST_CASE_P(
                       make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
                                  &aom_idct4x4_1_add_c, TX_4X4, 1)));
 
-#if HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     NEON, PartialIDctTest,
     ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
@@ -205,9 +205,9 @@ INSTANTIATE_TEST_CASE_P(
                                  &aom_idct8x8_1_add_neon, TX_8X8, 1),
                       make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
                                  &aom_idct4x4_1_add_neon, TX_4X4, 1)));
-#endif  // HAVE_NEON && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_NEON && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
-#if HAVE_SSE2 && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_SSE2 && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSE2, PartialIDctTest,
     ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
@@ -227,14 +227,14 @@ INSTANTIATE_TEST_CASE_P(
 #endif
 
 #if HAVE_SSSE3 && CONFIG_USE_X86INC && ARCH_X86_64 && \
-    !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+    !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     SSSE3_64, PartialIDctTest,
     ::testing::Values(make_tuple(&aom_fdct8x8_c, &aom_idct8x8_64_add_c,
                                  &aom_idct8x8_12_add_ssse3, TX_8X8, 12)));
 #endif
 
-#if HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#if HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 INSTANTIATE_TEST_CASE_P(
     MSA, PartialIDctTest,
     ::testing::Values(make_tuple(&aom_fdct32x32_c, &aom_idct32x32_1024_add_c,
@@ -251,6 +251,6 @@ INSTANTIATE_TEST_CASE_P(
                                  &aom_idct8x8_1_add_msa, TX_8X8, 1),
                       make_tuple(&aom_fdct4x4_c, &aom_idct4x4_16_add_c,
                                  &aom_idct4x4_1_add_msa, TX_4X4, 1)));
-#endif  // HAVE_MSA && !CONFIG_VPX_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+#endif  // HAVE_MSA && !CONFIG_AOM_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
 
 }  // namespace
diff --git a/test/quantize_test.cc b/test/quantize_test.cc
index 29bd48d42f578ac158d04676e49c8ed006a1888e..0418509d3a20095d77ae06d96b70e334cb9c87c6 100644
--- a/test/quantize_test.cc
+++ b/test/quantize_test.cc
@@ -29,7 +29,7 @@ using libaom_test::ACMRandom;
 
 namespace {
 #if !CONFIG_AOM_QM
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 const int number_of_iterations = 100;
 
 typedef void (*QuantizeFunc)(const tran_low_t *coeff, intptr_t count,
@@ -100,7 +100,7 @@ TEST_P(VP9QuantizeTest, OperationCheck) {
     const int skip_block = i == 0;
     const TX_SIZE sz = (TX_SIZE)(i % 3);  // TX_4X4, TX_8X8 TX_16X16
     const TX_TYPE tx_type = (TX_TYPE)((i >> 2) % 3);
-    const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+    const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
     const int count = (4 << sz) * (4 << sz);  // 16, 64, 256
     int err_count = 0;
     *eob_ptr = rnd.Rand16();
@@ -158,7 +158,7 @@ TEST_P(VP9Quantize32Test, OperationCheck) {
     const int skip_block = i == 0;
     const TX_SIZE sz = TX_32X32;
     const TX_TYPE tx_type = (TX_TYPE)(i % 4);
-    const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+    const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
     const int count = (4 << sz) * (4 << sz);  // 1024
     int err_count = 0;
     *eob_ptr = rnd.Rand16();
@@ -216,7 +216,7 @@ TEST_P(VP9QuantizeTest, EOBCheck) {
     int skip_block = i == 0;
     TX_SIZE sz = (TX_SIZE)(i % 3);  // TX_4X4, TX_8X8 TX_16X16
     TX_TYPE tx_type = (TX_TYPE)((i >> 2) % 3);
-    const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+    const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
     int count = (4 << sz) * (4 << sz);  // 16, 64, 256
     int err_count = 0;
     *eob_ptr = rnd.Rand16();
@@ -279,7 +279,7 @@ TEST_P(VP9Quantize32Test, EOBCheck) {
     int skip_block = i == 0;
     TX_SIZE sz = TX_32X32;
     TX_TYPE tx_type = (TX_TYPE)(i % 4);
-    const scan_order *scan_order = &vp10_scan_orders[sz][tx_type];
+    const scan_order *scan_order = &av1_scan_orders[sz][tx_type];
     int count = (4 << sz) * (4 << sz);  // 1024
     int err_count = 0;
     *eob_ptr = rnd.Rand16();
@@ -341,6 +341,6 @@ INSTANTIATE_TEST_CASE_P(
                       make_tuple(&aom_highbd_quantize_b_32x32_sse2,
                                  &aom_highbd_quantize_b_32x32_c, VPX_BITS_12)));
 #endif  // HAVE_SSE2
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // CONFIG_AOM_QM
 }  // namespace
diff --git a/test/register_state_check.h b/test/register_state_check.h
index 1c7b855b3066d6304548ee17c06a62f0beea89d7..7481e4d267ddc54b5a54e24c1aaeadf4a8e78109 100644
--- a/test/register_state_check.h
+++ b/test/register_state_check.h
@@ -96,7 +96,7 @@ class RegisterStateCheck {
 }  // namespace libaom_test
 
 #elif defined(CONFIG_SHARED) && defined(HAVE_NEON_ASM) && !CONFIG_SHARED && \
-    HAVE_NEON_ASM && CONFIG_VP10
+    HAVE_NEON_ASM && CONFIG_AV1
 
 extern "C" {
 // Save the d8-d15 registers into store.
diff --git a/test/resize_test.cc b/test/resize_test.cc
index 41b4a7bb99f75efc5b5498731a51a412a41bddac..8ddd08cf58d029fab091033c5d20e19062fc94bc 100644
--- a/test/resize_test.cc
+++ b/test/resize_test.cc
@@ -524,13 +524,13 @@ TEST_P(ResizeCspTest, TestResizeCspWorks) {
   ASSERT_NO_FATAL_FAILURE(RunLoop(&video));
 }
 
-VP10_INSTANTIATE_TEST_CASE(ResizeTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeTest,
                            ::testing::Values(::libaom_test::kRealTime));
-VP10_INSTANTIATE_TEST_CASE(ResizeInternalTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeInternalTest,
                            ::testing::Values(::libaom_test::kOnePassBest));
-VP10_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeRealtimeTest,
                            ::testing::Values(::libaom_test::kRealTime),
                            ::testing::Range(5, 9));
-VP10_INSTANTIATE_TEST_CASE(ResizeCspTest,
+AV1_INSTANTIATE_TEST_CASE(ResizeCspTest,
                            ::testing::Values(::libaom_test::kRealTime));
 }  // namespace
diff --git a/test/sad_test.cc b/test/sad_test.cc
index b450458cc67904bb6c49717c680da824790dd61f..218d02c0169703a96c38f30cb13797e3f586446a 100644
--- a/test/sad_test.cc
+++ b/test/sad_test.cc
@@ -91,14 +91,14 @@ class SADTestBase : public ::testing::Test {
       source_data_ = source_data8_;
       reference_data_ = reference_data8_;
       second_pred_ = second_pred8_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       use_high_bit_depth_ = true;
       bit_depth_ = static_cast<aom_bit_depth_t>(bd_);
       source_data_ = CONVERT_TO_BYTEPTR(source_data16_);
       reference_data_ = CONVERT_TO_BYTEPTR(reference_data16_);
       second_pred_ = CONVERT_TO_BYTEPTR(second_pred16_);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     mask_ = (1 << bit_depth_) - 1;
     source_stride_ = (width_ + 31) & ~31;
@@ -107,11 +107,11 @@ class SADTestBase : public ::testing::Test {
   }
 
   virtual uint8_t *GetReference(int block_idx) {
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     if (use_high_bit_depth_)
       return CONVERT_TO_BYTEPTR(CONVERT_TO_SHORTPTR(reference_data_) +
                                 block_idx * kDataBlockSize);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     return reference_data_ + block_idx * kDataBlockSize;
   }
 
@@ -121,21 +121,21 @@ class SADTestBase : public ::testing::Test {
     unsigned int sad = 0;
     const uint8_t *const reference8 = GetReference(block_idx);
     const uint8_t *const source8 = source_data_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint16_t *const reference16 =
         CONVERT_TO_SHORTPTR(GetReference(block_idx));
     const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           sad += abs(source8[h * source_stride_ + w] -
                      reference8[h * reference_stride_ + w]);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           sad += abs(source16[h * source_stride_ + w] -
                      reference16[h * reference_stride_ + w]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -150,12 +150,12 @@ class SADTestBase : public ::testing::Test {
     const uint8_t *const reference8 = GetReference(block_idx);
     const uint8_t *const source8 = source_data_;
     const uint8_t *const second_pred8 = second_pred_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     const uint16_t *const reference16 =
         CONVERT_TO_SHORTPTR(GetReference(block_idx));
     const uint16_t *const source16 = CONVERT_TO_SHORTPTR(source_data_);
     const uint16_t *const second_pred16 = CONVERT_TO_SHORTPTR(second_pred_);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
@@ -163,13 +163,13 @@ class SADTestBase : public ::testing::Test {
                           reference8[h * reference_stride_ + w];
           const uint8_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
           sad += abs(source8[h * source_stride_ + w] - comp_pred);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           const int tmp = second_pred16[h * width_ + w] +
                           reference16[h * reference_stride_ + w];
           const uint16_t comp_pred = ROUND_POWER_OF_TWO(tmp, 1);
           sad += abs(source16[h * source_stride_ + w] - comp_pred);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -178,17 +178,17 @@ class SADTestBase : public ::testing::Test {
 
   void FillConstant(uint8_t *data, int stride, uint16_t fill_constant) {
     uint8_t *data8 = data;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           data8[h * stride + w] = static_cast<uint8_t>(fill_constant);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           data16[h * stride + w] = fill_constant;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -196,17 +196,17 @@ class SADTestBase : public ::testing::Test {
 
   void FillRandom(uint8_t *data, int stride) {
     uint8_t *data8 = data;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     uint16_t *data16 = CONVERT_TO_SHORTPTR(data);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     for (int h = 0; h < height_; ++h) {
       for (int w = 0; w < width_; ++w) {
         if (!use_high_bit_depth_) {
           data8[h * stride + w] = rnd_.Rand8();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
         } else {
           data16[h * stride + w] = rnd_.Rand16() & mask_;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
         }
       }
     }
@@ -485,7 +485,7 @@ const SadMxNParam c_tests[] = {
   make_tuple(8, 4, &aom_sad8x4_c, -1),
   make_tuple(4, 8, &aom_sad4x8_c, -1),
   make_tuple(4, 4, &aom_sad4x4_c, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   make_tuple(64, 64, &aom_highbd_sad64x64_c, 8),
   make_tuple(64, 32, &aom_highbd_sad64x32_c, 8),
   make_tuple(32, 64, &aom_highbd_sad32x64_c, 8),
@@ -525,7 +525,7 @@ const SadMxNParam c_tests[] = {
   make_tuple(8, 4, &aom_highbd_sad8x4_c, 12),
   make_tuple(4, 8, &aom_highbd_sad4x8_c, 12),
   make_tuple(4, 4, &aom_highbd_sad4x4_c, 12),
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADTest, ::testing::ValuesIn(c_tests));
 
@@ -543,7 +543,7 @@ const SadMxNAvgParam avg_c_tests[] = {
   make_tuple(8, 4, &aom_sad8x4_avg_c, -1),
   make_tuple(4, 8, &aom_sad4x8_avg_c, -1),
   make_tuple(4, 4, &aom_sad4x4_avg_c, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   make_tuple(64, 64, &aom_highbd_sad64x64_avg_c, 8),
   make_tuple(64, 32, &aom_highbd_sad64x32_avg_c, 8),
   make_tuple(32, 64, &aom_highbd_sad32x64_avg_c, 8),
@@ -583,7 +583,7 @@ const SadMxNAvgParam avg_c_tests[] = {
   make_tuple(8, 4, &aom_highbd_sad8x4_avg_c, 12),
   make_tuple(4, 8, &aom_highbd_sad4x8_avg_c, 12),
   make_tuple(4, 4, &aom_highbd_sad4x4_avg_c, 12),
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADavgTest, ::testing::ValuesIn(avg_c_tests));
 
@@ -601,7 +601,7 @@ const SadMxNx4Param x4d_c_tests[] = {
   make_tuple(8, 4, &aom_sad8x4x4d_c, -1),
   make_tuple(4, 8, &aom_sad4x8x4d_c, -1),
   make_tuple(4, 4, &aom_sad4x4x4d_c, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   make_tuple(64, 64, &aom_highbd_sad64x64x4d_c, 8),
   make_tuple(64, 32, &aom_highbd_sad64x32x4d_c, 8),
   make_tuple(32, 64, &aom_highbd_sad32x64x4d_c, 8),
@@ -641,7 +641,7 @@ const SadMxNx4Param x4d_c_tests[] = {
   make_tuple(8, 4, &aom_highbd_sad8x4x4d_c, 12),
   make_tuple(4, 8, &aom_highbd_sad4x8x4d_c, 12),
   make_tuple(4, 4, &aom_highbd_sad4x4x4d_c, 12),
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(C, SADx4Test, ::testing::ValuesIn(x4d_c_tests));
 
@@ -713,7 +713,7 @@ const SadMxNParam sse2_tests[] = {
   make_tuple(8, 4, &aom_sad8x4_sse2, -1),
   make_tuple(4, 8, &aom_sad4x8_sse2, -1),
   make_tuple(4, 4, &aom_sad4x4_sse2, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   make_tuple(64, 64, &aom_highbd_sad64x64_sse2, 8),
   make_tuple(64, 32, &aom_highbd_sad64x32_sse2, 8),
   make_tuple(32, 64, &aom_highbd_sad32x64_sse2, 8),
@@ -747,7 +747,7 @@ const SadMxNParam sse2_tests[] = {
   make_tuple(8, 16, &aom_highbd_sad8x16_sse2, 12),
   make_tuple(8, 8, &aom_highbd_sad8x8_sse2, 12),
   make_tuple(8, 4, &aom_highbd_sad8x4_sse2, 12),
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADTest, ::testing::ValuesIn(sse2_tests));
 
@@ -765,7 +765,7 @@ const SadMxNAvgParam avg_sse2_tests[] = {
   make_tuple(8, 4, &aom_sad8x4_avg_sse2, -1),
   make_tuple(4, 8, &aom_sad4x8_avg_sse2, -1),
   make_tuple(4, 4, &aom_sad4x4_avg_sse2, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   make_tuple(64, 64, &aom_highbd_sad64x64_avg_sse2, 8),
   make_tuple(64, 32, &aom_highbd_sad64x32_avg_sse2, 8),
   make_tuple(32, 64, &aom_highbd_sad32x64_avg_sse2, 8),
@@ -799,7 +799,7 @@ const SadMxNAvgParam avg_sse2_tests[] = {
   make_tuple(8, 16, &aom_highbd_sad8x16_avg_sse2, 12),
   make_tuple(8, 8, &aom_highbd_sad8x8_avg_sse2, 12),
   make_tuple(8, 4, &aom_highbd_sad8x4_avg_sse2, 12),
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADavgTest, ::testing::ValuesIn(avg_sse2_tests));
 
@@ -815,7 +815,7 @@ const SadMxNx4Param x4d_sse2_tests[] = {
   make_tuple(8, 16, &aom_sad8x16x4d_sse2, -1),
   make_tuple(8, 8, &aom_sad8x8x4d_sse2, -1),
   make_tuple(8, 4, &aom_sad8x4x4d_sse2, -1),
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   make_tuple(64, 64, &aom_highbd_sad64x64x4d_sse2, 8),
   make_tuple(64, 32, &aom_highbd_sad64x32x4d_sse2, 8),
   make_tuple(32, 64, &aom_highbd_sad32x64x4d_sse2, 8),
@@ -855,7 +855,7 @@ const SadMxNx4Param x4d_sse2_tests[] = {
   make_tuple(8, 4, &aom_highbd_sad8x4x4d_sse2, 12),
   make_tuple(4, 8, &aom_highbd_sad4x8x4d_sse2, 12),
   make_tuple(4, 4, &aom_highbd_sad4x4x4d_sse2, 12),
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 };
 INSTANTIATE_TEST_CASE_P(SSE2, SADx4Test, ::testing::ValuesIn(x4d_sse2_tests));
 #endif  // CONFIG_USE_X86INC
diff --git a/test/superframe_test.cc b/test/superframe_test.cc
index 40d4f6f8cc3668adcf0ef031e2621e5122250ddd..19ebcfd1006722e4bf3aa355766cf32f08b119dd 100644
--- a/test/superframe_test.cc
+++ b/test/superframe_test.cc
@@ -37,7 +37,7 @@ class SuperframeTest
     SetMode(mode);
     sf_count_ = 0;
     sf_count_max_ = INT_MAX;
-    is_vp10_style_superframe_ = syntax;
+    is_av1_style_superframe_ = syntax;
   }
 
   virtual void TearDown() { delete[] modified_buf_; }
@@ -58,7 +58,7 @@ class SuperframeTest
     const int frames = (marker & 0x7) + 1;
     const int mag = ((marker >> 3) & 3) + 1;
     const unsigned int index_sz =
-        2 + mag * (frames - is_vp10_style_superframe_);
+        2 + mag * (frames - is_av1_style_superframe_);
     if ((marker & 0xe0) == 0xc0 && pkt->data.frame.sz >= index_sz &&
         buffer[pkt->data.frame.sz - index_sz] == marker) {
       // frame is a superframe. strip off the index.
@@ -80,7 +80,7 @@ class SuperframeTest
     return pkt;
   }
 
-  int is_vp10_style_superframe_;
+  int is_av1_style_superframe_;
   int sf_count_;
   int sf_count_max_;
   aom_codec_cx_pkt_t modified_pkt_;
@@ -98,7 +98,7 @@ TEST_P(SuperframeTest, TestSuperframeIndexIsOptional) {
   EXPECT_EQ(sf_count_, 1);
 }
 
-VP10_INSTANTIATE_TEST_CASE(
+AV1_INSTANTIATE_TEST_CASE(
     SuperframeTest,
     ::testing::Combine(::testing::Values(::libaom_test::kTwoPassGood),
                        ::testing::Values(CONFIG_MISC_FIXES)));
diff --git a/test/test-data.mk b/test/test-data.mk
index 9faa4f7b68d59bdfeb8ab29eafe82c20c46844e8..7b174fba64a7ba41f234fc936c7e4018b8ce3b31 100644
--- a/test/test-data.mk
+++ b/test/test-data.mk
@@ -18,10 +18,10 @@ LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_422.y4m
 LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_444.y4m
 LIBAOM_TEST_DATA-$(CONFIG_ENCODERS) += park_joy_90p_8_440.yuv
 
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += desktop_credits.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += niklas_1280_720_30.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += rush_hour_444.y4m
-LIBAOM_TEST_DATA-$(CONFIG_VP10_ENCODER) += screendata.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += desktop_credits.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += niklas_1280_720_30.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += rush_hour_444.y4m
+LIBAOM_TEST_DATA-$(CONFIG_AV1_ENCODER) += screendata.y4m
 
 # sort and remove duplicates
 LIBAOM_TEST_DATA-yes := $(sort $(LIBAOM_TEST_DATA-yes))
diff --git a/test/test.mk b/test/test.mk
index a0843cb38d5d0642c1d05e2940926fa11bdfa254..b23c3ce249b1797e9aa43bb7283cc0aaa1f71114 100644
--- a/test/test.mk
+++ b/test/test.mk
@@ -26,14 +26,14 @@ LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS)    += i420_video_source.h
 LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS)    += y4m_video_source.h
 LIBAOM_TEST_SRCS-$(CONFIG_ENCODERS)    += yuv_video_source.h
 
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += active_map_refresh_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += active_map_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += borders_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += cpu_speed_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += frame_size_tests.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += lossless_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += end_to_end_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += ethread_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += active_map_refresh_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += active_map_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += borders_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += cpu_speed_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += frame_size_tests.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += lossless_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += end_to_end_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += ethread_test.cc
 
 LIBAOM_TEST_SRCS-yes                   += decode_test_driver.cc
 LIBAOM_TEST_SRCS-yes                   += decode_test_driver.h
@@ -63,13 +63,13 @@ LIBAOM_TEST_SRCS-$(CONFIG_DECODERS)    += decode_api_test.cc
 
 # Currently we only support decoder perf tests for vp9. Also they read from WebM
 # files, so WebM IO is required.
-ifeq ($(CONFIG_DECODE_PERF_TESTS)$(CONFIG_VP10_DECODER)$(CONFIG_WEBM_IO), \
+ifeq ($(CONFIG_DECODE_PERF_TESTS)$(CONFIG_AV1_DECODER)$(CONFIG_WEBM_IO), \
       yesyesyes)
 LIBAOM_TEST_SRCS-yes                   += decode_perf_test.cc
 endif
 
 # encode perf tests are vp9 only
-ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_VP10_ENCODER), yesyes)
+ifeq ($(CONFIG_ENCODE_PERF_TESTS)$(CONFIG_AV1_ENCODER), yesyes)
 LIBAOM_TEST_SRCS-yes += encode_perf_test.cc
 endif
 
@@ -81,11 +81,11 @@ endif
 ##
 ifeq ($(CONFIG_SHARED),)
 
-## VP10
-ifeq ($(CONFIG_VP10),yes)
+## AV1
+ifeq ($(CONFIG_AV1),yes)
 
 # These tests require both the encoder and decoder to be built.
-ifeq ($(CONFIG_VP10_ENCODER)$(CONFIG_VP10_DECODER),yesyes)
+ifeq ($(CONFIG_AV1_ENCODER)$(CONFIG_AV1_DECODER),yesyes)
 # IDCT test currently depends on FDCT function
 LIBAOM_TEST_SRCS-yes                   += idct8x8_test.cc
 LIBAOM_TEST_SRCS-yes                   += partial_idct_test.cc
@@ -99,31 +99,31 @@ endif
 LIBAOM_TEST_SRCS-yes                   += convolve_test.cc
 LIBAOM_TEST_SRCS-yes                   += lpf_8_test.cc
 LIBAOM_TEST_SRCS-yes                   += intrapred_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += dct16x16_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += dct32x32_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += fdct4x4_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += fdct8x8_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += variance_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += quantize_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += subtract_test.cc
-
-ifeq ($(CONFIG_VP10_ENCODER),yes)
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct16x16_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += dct32x32_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fdct4x4_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += fdct8x8_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += variance_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += quantize_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += subtract_test.cc
+
+ifeq ($(CONFIG_AV1_ENCODER),yes)
 LIBAOM_TEST_SRCS-$(CONFIG_SPATIAL_SVC) += svc_test.cc
 endif
 
-ifeq ($(CONFIG_VP10_ENCODER)$(CONFIG_VP10_TEMPORAL_DENOISING),yesyes)
+ifeq ($(CONFIG_AV1_ENCODER)$(CONFIG_AV1_TEMPORAL_DENOISING),yesyes)
 LIBAOM_TEST_SRCS-$(HAVE_SSE2) += denoiser_sse2_test.cc
 endif
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += arf_freq_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += arf_freq_test.cc
 
 LIBAOM_TEST_SRCS-yes                    += av1_inv_txfm_test.cc
-LIBAOM_TEST_SRCS-$(CONFIG_VP10_ENCODER) += av1_dct_test.cc
+LIBAOM_TEST_SRCS-$(CONFIG_AV1_ENCODER) += av1_dct_test.cc
 
-endif # VP10
+endif # AV1
 
 ## Multi-codec / unconditional whitebox tests.
 
-ifeq ($(findstring yes,$(CONFIG_VP10_ENCODER)$(CONFIG_VP10_ENCODER)),yes)
+ifeq ($(findstring yes,$(CONFIG_AV1_ENCODER)$(CONFIG_AV1_ENCODER)),yes)
 LIBAOM_TEST_SRCS-yes += avg_test.cc
 endif
 
diff --git a/test/test_libaom.cc b/test/test_libaom.cc
index cfbb188b7e2c34182fa79660829901e73cad87aa..c8ea59b3efc664d2519303ea27a0457a83c01402 100644
--- a/test/test_libaom.cc
+++ b/test/test_libaom.cc
@@ -16,9 +16,9 @@
 #include "aom_ports/x86.h"
 #endif
 extern "C" {
-#if CONFIG_VP10
+#if CONFIG_AV1
 extern void av1_rtcd();
-#endif  // CONFIG_VP10
+#endif  // CONFIG_AV1
 extern void aom_dsp_rtcd();
 extern void aom_scale_rtcd();
 }
@@ -54,9 +54,9 @@ int main(int argc, char **argv) {
 // Shared library builds don't support whitebox tests
 // that exercise internal symbols.
 
-#if CONFIG_VP10
+#if CONFIG_AV1
   av1_rtcd();
-#endif  // CONFIG_VP10
+#endif  // CONFIG_AV1
   aom_dsp_rtcd();
   aom_scale_rtcd();
 #endif  // !CONFIG_SHARED
diff --git a/test/tile_independence_test.cc b/test/tile_independence_test.cc
index b5d929f47441eb9b379e7f56ee1106a08302a9e9..dad2dcd26f2cd8efd7802cf7001dc9d9450037f2 100644
--- a/test/tile_independence_test.cc
+++ b/test/tile_independence_test.cc
@@ -100,6 +100,6 @@ TEST_P(TileIndependenceTest, MD5Match) {
   ASSERT_STREQ(md5_fw_str, md5_inv_str);
 }
 
-VP10_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
+AV1_INSTANTIATE_TEST_CASE(TileIndependenceTest, ::testing::Range(0, 2, 1));
 
 }  // namespace
diff --git a/test/tools_common.sh b/test/tools_common.sh
index e79df8e90e1269f702fc81640cddbe73e7c70d47..b931e347e537924d05ef6ed21b3467b4de1af3fb 100755
--- a/test/tools_common.sh
+++ b/test/tools_common.sh
@@ -173,15 +173,15 @@ aom_tool_available() {
 }
 
 # Echoes yes to stdout when aom_config_option_enabled() reports yes for
-# CONFIG_VP10_DECODER.
-vp10_decode_available() {
-  [ "$(aom_config_option_enabled CONFIG_VP10_DECODER)" = "yes" ] && echo yes
+# CONFIG_AV1_DECODER.
+av1_decode_available() {
+  [ "$(aom_config_option_enabled CONFIG_AV1_DECODER)" = "yes" ] && echo yes
 }
 
 # Echoes yes to stdout when aom_config_option_enabled() reports yes for
-# CONFIG_VP10_ENCODER.
-vp10_encode_available() {
-  [ "$(aom_config_option_enabled CONFIG_VP10_ENCODER)" = "yes" ] && echo yes
+# CONFIG_AV1_ENCODER.
+av1_encode_available() {
+  [ "$(aom_config_option_enabled CONFIG_AV1_ENCODER)" = "yes" ] && echo yes
 }
 # CONFIG_WEBM_IO.
 webm_io_available() {
diff --git a/test/variance_test.cc b/test/variance_test.cc
index 02c061ab0d3156283debfe98580a7d1a5815a535..d8da018f654cd66b58213304b62277d275239f5a 100644
--- a/test/variance_test.cc
+++ b/test/variance_test.cc
@@ -89,13 +89,13 @@ static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref, int l2w,
                src[w * y * src_stride_coeff + x];
         se += diff;
         sse += diff * diff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         diff = CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x] -
                CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x];
         se += diff;
         sse += diff * diff;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -137,7 +137,7 @@ static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
         const int diff = r - src[w * y + x];
         se += diff;
         sse += diff * diff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
         uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
@@ -151,7 +151,7 @@ static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
         const int diff = r - src16[w * y + x];
         se += diff;
         sse += diff * diff;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -226,12 +226,12 @@ class VarianceTest : public ::testing::TestWithParam<
     if (!use_high_bit_depth_) {
       src_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_ * 2));
       ref_ = new uint8_t[block_size_ * 2];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
           aom_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
       ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     ASSERT_TRUE(src_ != NULL);
     ASSERT_TRUE(ref_ != NULL);
@@ -241,11 +241,11 @@ class VarianceTest : public ::testing::TestWithParam<
     if (!use_high_bit_depth_) {
       aom_free(src_);
       delete[] ref_;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       aom_free(CONVERT_TO_SHORTPTR(src_));
       delete[] CONVERT_TO_SHORTPTR(ref_);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     libaom_test::ClearSystemState();
   }
@@ -273,20 +273,20 @@ void VarianceTest<VarianceFunctionType>::ZeroTest() {
   for (int i = 0; i <= 255; ++i) {
     if (!use_high_bit_depth_) {
       memset(src_, i, block_size_);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       aom_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
                    block_size_);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     for (int j = 0; j <= 255; ++j) {
       if (!use_high_bit_depth_) {
         memset(ref_, j, block_size_);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         aom_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
                      block_size_);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       unsigned int sse;
       unsigned int var;
@@ -304,11 +304,11 @@ void VarianceTest<VarianceFunctionType>::RefTest() {
       if (!use_high_bit_depth_) {
         src_[j] = rnd_.Rand8();
         ref_[j] = rnd_.Rand8();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() && mask_;
         CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() && mask_;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
     unsigned int sse1, sse2;
@@ -335,11 +335,11 @@ void VarianceTest<VarianceFunctionType>::RefStrideTest() {
       if (!use_high_bit_depth_) {
         src_[src_ind] = rnd_.Rand8();
         ref_[ref_ind] = rnd_.Rand8();
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() && mask_;
         CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() && mask_;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
     unsigned int sse1, sse2;
@@ -363,13 +363,13 @@ void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
     memset(src_, 255, block_size_);
     memset(ref_, 255, half);
     memset(ref_ + half, 0, half);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
   } else {
     aom_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
                  block_size_);
     aom_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
     aom_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
   }
   unsigned int sse;
   unsigned int var;
@@ -501,7 +501,7 @@ static uint32_t subpel_avg_variance_ref(const uint8_t *ref, const uint8_t *src,
             ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
         se += diff;
         sse += diff * diff;
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
         uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
@@ -516,7 +516,7 @@ static uint32_t subpel_avg_variance_ref(const uint8_t *ref, const uint8_t *src,
         const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
         se += diff;
         sse += diff * diff;
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
     }
   }
@@ -554,7 +554,7 @@ class SubpelVarianceTest
       src_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_));
       sec_ = reinterpret_cast<uint8_t *>(aom_memalign(16, block_size_));
       ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
           aom_memalign(16, block_size_ * sizeof(uint16_t))));
@@ -562,7 +562,7 @@ class SubpelVarianceTest
           aom_memalign(16, block_size_ * sizeof(uint16_t))));
       ref_ =
           CONVERT_TO_BYTEPTR(new uint16_t[block_size_ + width_ + height_ + 1]);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     ASSERT_TRUE(src_ != NULL);
     ASSERT_TRUE(sec_ != NULL);
@@ -574,12 +574,12 @@ class SubpelVarianceTest
       aom_free(src_);
       delete[] ref_;
       aom_free(sec_);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
     } else {
       aom_free(CONVERT_TO_SHORTPTR(src_));
       delete[] CONVERT_TO_SHORTPTR(ref_);
       aom_free(CONVERT_TO_SHORTPTR(sec_));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
     }
     libaom_test::ClearSystemState();
   }
@@ -611,7 +611,7 @@ void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           ref_[j] = rnd_.Rand8();
         }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         for (int j = 0; j < block_size_; j++) {
           CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
@@ -619,7 +619,7 @@ void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
         }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       unsigned int sse1, sse2;
       unsigned int var1;
@@ -647,14 +647,14 @@ void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
         memset(src_ + half, 255, half);
         memset(ref_, 255, half);
         memset(ref_ + half, 0, half + width_ + height_ + 1);
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         aom_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
         aom_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
         aom_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
         aom_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
                      half + width_ + height_ + 1);
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       unsigned int sse1, sse2;
       unsigned int var1;
@@ -681,7 +681,7 @@ void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           ref_[j] = rnd_.Rand8();
         }
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
       } else {
         for (int j = 0; j < block_size_; j++) {
           CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
@@ -690,7 +690,7 @@ void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
         for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
           CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
         }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
       }
       unsigned int sse1, sse2;
       unsigned int var1;
@@ -787,7 +787,7 @@ INSTANTIATE_TEST_CASE_P(
                       make_tuple(2, 3, &aom_sub_pixel_avg_variance4x8_c, 0),
                       make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_c, 0)));
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
 typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
 typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
@@ -947,7 +947,7 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(3, 2, &aom_highbd_12_sub_pixel_avg_variance8x4_c, 12),
         make_tuple(2, 3, &aom_highbd_12_sub_pixel_avg_variance4x8_c, 12),
         make_tuple(2, 2, &aom_highbd_12_sub_pixel_avg_variance4x4_c, 12)));
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 
 #if HAVE_MMX
 INSTANTIATE_TEST_CASE_P(MMX, VpxMseTest,
@@ -1034,7 +1034,7 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(2, 2, &aom_sub_pixel_avg_variance4x4_sse, 0)));
 #endif  // CONFIG_USE_X86INC
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 /* TODO(debargha): This test does not support the highbd version
 INSTANTIATE_TEST_CASE_P(
     SSE2, VpxHBDMseTest,
@@ -1160,7 +1160,7 @@ INSTANTIATE_TEST_CASE_P(
         make_tuple(3, 3, &aom_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
         make_tuple(3, 2, &aom_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
 #endif  // CONFIG_USE_X86INC
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
 #endif  // HAVE_SSE2
 
 #if HAVE_SSSE3
diff --git a/third_party/libwebm/mkvmuxer.cpp b/third_party/libwebm/mkvmuxer.cpp
index 9be3119a4603311635058d3d1bd32da7a41ae913..bf19f041d2855a298da4a48cec405d198fc34456 100644
--- a/third_party/libwebm/mkvmuxer.cpp
+++ b/third_party/libwebm/mkvmuxer.cpp
@@ -1045,7 +1045,7 @@ const char Tracks::kOpusCodecId[] = "A_OPUS";
 const char Tracks::kVorbisCodecId[] = "A_VORBIS";
 const char Tracks::kVp8CodecId[] = "V_VP8";
 const char Tracks::kVp9CodecId[] = "V_VP9";
-const char Tracks::kVp10CodecId[] = "V_VP10";
+const char Tracks::kAV1CodecId[] = "V_AV1";
 
 Tracks::Tracks() : track_entries_(NULL), track_entries_size_(0) {}
 
diff --git a/third_party/libwebm/mkvmuxer.hpp b/third_party/libwebm/mkvmuxer.hpp
index 03a002c93b3b7bf881e6124075483331b4c936fb..27e0a3d7db617b9e8f573e0c54da81acf209f594 100644
--- a/third_party/libwebm/mkvmuxer.hpp
+++ b/third_party/libwebm/mkvmuxer.hpp
@@ -533,7 +533,7 @@ class Tracks {
   static const char kVorbisCodecId[];
   static const char kVp8CodecId[];
   static const char kVp9CodecId[];
-  static const char kVp10CodecId[];
+  static const char kAV1CodecId[];
 
   Tracks();
   ~Tracks();
diff --git a/tools_common.c b/tools_common.c
index dcec1ab30cc20a710aed29b3f89207c15d864a05..8d2c76464caecb338b28990e9bae6f8253172cf4 100644
--- a/tools_common.c
+++ b/tools_common.c
@@ -17,11 +17,11 @@
 
 #include "./tools_common.h"
 
-#if CONFIG_VP10_ENCODER
+#if CONFIG_AV1_ENCODER
 #include "aom/vp8cx.h"
 #endif
 
-#if CONFIG_VP10_DECODER
+#if CONFIG_AV1_DECODER
 #include "aom/vp8dx.h"
 #endif
 
@@ -131,8 +131,8 @@ int read_yuv_frame(struct VpxInputContext *input_ctx, aom_image_t *yuv_frame) {
 #if CONFIG_ENCODERS
 
 static const VpxInterface aom_encoders[] = {
-#if CONFIG_VP10_ENCODER
-  { "vp10", VP10_FOURCC, &aom_codec_vp10_cx },
+#if CONFIG_AV1_ENCODER
+  { "av1", AV1_FOURCC, &aom_codec_av1_cx },
 #endif
 };
 
@@ -158,8 +158,8 @@ const VpxInterface *get_aom_encoder_by_name(const char *name) {
 #if CONFIG_DECODERS
 
 static const VpxInterface aom_decoders[] = {
-#if CONFIG_VP10_DECODER
-  { "vp10", VP10_FOURCC, &aom_codec_vp10_dx },
+#if CONFIG_AV1_DECODER
+  { "av1", AV1_FOURCC, &aom_codec_av1_dx },
 #endif
 };
 
@@ -260,7 +260,7 @@ double sse_to_psnr(double samples, double peak, double sse) {
 }
 
 // TODO(debargha): Consolidate the functions below into a separate file.
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 static void highbd_img_upshift(aom_image_t *dst, aom_image_t *src,
                                int input_shift) {
   // Note the offset is 1 less than half.
@@ -451,4 +451,4 @@ void aom_img_downshift(aom_image_t *dst, aom_image_t *src, int down_shift) {
     lowbd_img_downshift(dst, src, down_shift);
   }
 }
-#endif  // CONFIG_VPX_HIGHBITDEPTH
+#endif  // CONFIG_AOM_HIGHBITDEPTH
diff --git a/tools_common.h b/tools_common.h
index 4e7ef070edaa9032bc1de0a85e65aae5a01dc0de..1fd39d16e2c5bdf880c2821bd613c8a248aeca5f 100644
--- a/tools_common.h
+++ b/tools_common.h
@@ -63,7 +63,7 @@
 
 #define VP8_FOURCC 0x30385056
 #define VP9_FOURCC 0x30395056
-#define VP10_FOURCC 0x303a5056
+#define AV1_FOURCC 0x303a5056
 
 enum VideoFileType {
   FILE_TYPE_RAW,
@@ -152,7 +152,7 @@ int aom_img_read(aom_image_t *img, FILE *file);
 
 double sse_to_psnr(double samples, double peak, double mse);
 
-#if CONFIG_VPX_HIGHBITDEPTH
+#if CONFIG_AOM_HIGHBITDEPTH
 void aom_img_upshift(aom_image_t *dst, aom_image_t *src, int input_shift);
 void aom_img_downshift(aom_image_t *dst, aom_image_t *src, int down_shift);
 void aom_img_truncate_16_to_8(aom_image_t *dst, aom_image_t *src);
diff --git a/webmdec.cc b/webmdec.cc
index 87c29a49e4f45a5d6d8298167d58e82b61d4e777..54fe0814ca7fd26b6b152ac0704871dab0a435af 100644
--- a/webmdec.cc
+++ b/webmdec.cc
@@ -103,8 +103,8 @@ int file_is_webm(struct WebmInputContext *webm_ctx,
     aom_ctx->fourcc = VP8_FOURCC;
   } else if (!strncmp(video_track->GetCodecId(), "V_VP9", 5)) {
     aom_ctx->fourcc = VP9_FOURCC;
-  } else if (!strncmp(video_track->GetCodecId(), "V_VP10", 6)) {
-    aom_ctx->fourcc = VP10_FOURCC;
+  } else if (!strncmp(video_track->GetCodecId(), "V_AV1", 6)) {
+    aom_ctx->fourcc = AV1_FOURCC;
   } else {
     rewind_and_reset(webm_ctx, aom_ctx);
     return 0;
diff --git a/webmenc.cc b/webmenc.cc
index 3b475bb9892ce4b6a6e2cb41081422db75c5e2a9..75cceeb74ba6c246dfb01887c23f47675f257caf 100644
--- a/webmenc.cc
+++ b/webmenc.cc
@@ -50,8 +50,8 @@ void write_webm_file_header(struct EbmlGlobal *glob,
   switch (fourcc) {
     case VP8_FOURCC: codec_id = "V_VP8"; break;
     case VP9_FOURCC: codec_id = "V_VP9"; break;
-    case VP10_FOURCC: codec_id = "V_VP10"; break;
-    default: codec_id = "V_VP10"; break;
+    case AV1_FOURCC: codec_id = "V_AV1"; break;
+    default: codec_id = "V_AV1"; break;
   }
   video_track->set_codec_id(codec_id);
   if (par->numerator > 1 || par->denominator > 1) {